From b84a35b7b91eca947f787648ceb361b1d023427b Mon Sep 17 00:00:00 2001 From: Ali Abdalla Date: Mon, 7 Aug 2023 09:54:36 -0700 Subject: [PATCH] Add icon and link to DuplicateButton (#5111) * fix icon/link in duplicatebutton * add changeset * add changeset * add icon/link to duplicate * add changeset * jsons --------- Co-authored-by: gradio-pr-bot --- .changeset/wise-cooks-itch.md | 6 + gradio/components/duplicate_button.py | 18 + .../src/routes/changelog/changelog.json | 4 +- js/_website/src/routes/demos/demos.json | 173 +- js/_website/src/routes/docs/docs.json | 19454 +--------------- .../guides/json/creating-a-new-component.json | 2 +- .../guides/json/guides_by_category.json | 2 +- js/_website/src/routes/version.json | 2 +- 8 files changed, 30 insertions(+), 19631 deletions(-) create mode 100644 .changeset/wise-cooks-itch.md diff --git a/.changeset/wise-cooks-itch.md b/.changeset/wise-cooks-itch.md new file mode 100644 index 0000000000000..7b98c65e26db2 --- /dev/null +++ b/.changeset/wise-cooks-itch.md @@ -0,0 +1,6 @@ +--- +"gradio": patch +"website": patch +--- + +fix:Add icon and link to DuplicateButton diff --git a/gradio/components/duplicate_button.py b/gradio/components/duplicate_button.py index e4ecc25c9b3d1..c6b8f486a9cf3 100644 --- a/gradio/components/duplicate_button.py +++ b/gradio/components/duplicate_button.py @@ -28,6 +28,8 @@ def __init__( value: str = "Duplicate Space", variant: Literal["primary", "secondary", "stop"] = "secondary", size: Literal["sm", "lg"] | None = "sm", + icon: str | None = None, + link: str | None = None, visible: bool = True, interactive: bool = True, elem_id: str | None = None, @@ -37,10 +39,26 @@ def __init__( _activate: bool = True, **kwargs, ): + """ + Parameters: + value: Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component. + variant: 'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button. + size: Size of the button. Can be "sm" or "lg". + icon: URL or path to the icon file to display within the button. If None, no icon will be displayed. + link: URL to open when the button is clicked. If None, no link will be used. + visible: If False, component will be hidden. + interactive: If False, the Button will be in a disabled state. + elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. + elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. + scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. + min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. + """ super().__init__( value, variant=variant, size=size, + icon=icon, + link=link, visible=visible, interactive=interactive, elem_id=elem_id, diff --git a/js/_website/src/routes/changelog/changelog.json b/js/_website/src/routes/changelog/changelog.json index 5f2ac8ad01067..21795cfcccdfd 100644 --- a/js/_website/src/routes/changelog/changelog.json +++ b/js/_website/src/routes/changelog/changelog.json @@ -1,3 +1 @@ -{ - "content": "# Changelog\n\n## 3.39.0\n\n### Highlights\n\n#### Create Discord Bots from Gradio Apps \ud83e\udd16 ([#4960](https://github.com/gradio-app/gradio/pull/4960) [`46e4ef67`](https://github.com/gradio-app/gradio/commit/46e4ef67d287dd68a91473b73172b29cbad064bc))\n\nWe're excited to announce that Gradio can now automatically create a discord bot from any `gr.ChatInterface` app. \n\nIt's as easy as importing `gradio_client`, connecting to the app, and calling `deploy_discord`!\n\n*\ud83e\udd99 Turning Llama 2 70b into a discord bot \ud83e\udd99*\n```python\nimport gradio_client as grc\ngrc.Client(\"ysharma/Explore_llamav2_with_TGI\").deploy_discord(to_id=\"llama2-70b-discord-bot\")\n```\n\n\n\n#### Getting started with template spaces\n\nTo help get you started, we have created an organization on Hugging Face called [gradio-discord-bots](https://huggingface.co/gradio-discord-bots) with template spaces you can use to turn state of the art LLMs powered by Gradio to discord bots.\n\nCurrently we have template spaces for:\n\n* [Llama-2-70b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-70b-chat-hf) powered by a FREE Hugging Face Inference Endpoint!\n* [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-13b-chat-hf) powered by Hugging Face Inference Endpoints.\n* [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/llama-2-13b-chat-transformers) powered by Hugging Face transformers.\n* [falcon-7b-instruct](https://huggingface.co/spaces/gradio-discord-bots/falcon-7b-instruct) powered by Hugging Face Inference Endpoints.\n* [gpt-3.5-turbo](https://huggingface.co/spaces/gradio-discord-bots/gpt-35-turbo), powered by openai. Requires an OpenAI key.\n\nBut once again, you can deploy ANY `gr.ChatInterface` app exposed on the internet! So don't hesitate to try it on your own Chatbots.\n\n\u2757\ufe0f Additional Note \u2757\ufe0f: Technically, any gradio app that exposes an api route that takes in a single string and outputs a single string can be deployed to discord. But `gr.ChatInterface` apps naturally lend themselves to discord's chat functionality so we suggest you start with those.\n\n Thanks [@freddyaboulton](https://github.com/freddyaboulton)!\n\n### Features\n\n- [#4995](https://github.com/gradio-app/gradio/pull/4995) [`3f8c210b`](https://github.com/gradio-app/gradio/commit/3f8c210b01ef1ceaaf8ee73be4bf246b5b745bbf) - Implement left and right click in `Gallery` component and show implicit images in `Gallery` grid. Thanks [@hannahblair](https://github.com/hannahblair)!\n- [#4993](https://github.com/gradio-app/gradio/pull/4993) [`dc07a9f9`](https://github.com/gradio-app/gradio/commit/dc07a9f947de44b419d8384987a02dcf94977851) - Bringing back the \"Add download button for audio\" PR by [@leuryr](https://github.com/leuryr). Thanks [@abidlabs](https://github.com/abidlabs)!\n- [#4979](https://github.com/gradio-app/gradio/pull/4979) [`44ac8ad0`](https://github.com/gradio-app/gradio/commit/44ac8ad08d82ea12c503dde5c78f999eb0452de2) - Allow setting sketch color default. Thanks [@aliabid94](https://github.com/aliabid94)!\n- [#4985](https://github.com/gradio-app/gradio/pull/4985) [`b74f8453`](https://github.com/gradio-app/gradio/commit/b74f8453034328f0e42da8e41785f5eb039b45d7) - Adds `additional_inputs` to `gr.ChatInterface`. Thanks [@abidlabs](https://github.com/abidlabs)!\n\n### Fixes\n\n- [#4997](https://github.com/gradio-app/gradio/pull/4997) [`41c83070`](https://github.com/gradio-app/gradio/commit/41c83070b01632084e7d29123048a96c1e261407) - Add CSS resets and specifiers to play nice with HF blog. Thanks [@aliabid94](https://github.com/aliabid94)!\n\n## 3.38\n\n### New Features:\n\n- Provide a parameter `animate` (`False` by default) in `gr.make_waveform()` which animates the overlayed waveform by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4918](https://github.com/gradio-app/gradio/pull/4918)\n- Add `show_download_button` param to allow the download button in static Image components to be hidden by [@hannahblair](https://github.com/hannahblair) in [PR 4959](https://github.com/gradio-app/gradio/pull/4959)\n- Added autofocus argument to Textbox by [@aliabid94](https://github.com/aliabid94) in [PR 4978](https://github.com/gradio-app/gradio/pull/4978) \n- The `gr.ChatInterface` UI now converts the \"Submit\" button to a \"Stop\" button in ChatInterface while streaming, which can be used to pause generation. By [@abidlabs](https://github.com/abidlabs) in [PR 4971](https://github.com/gradio-app/gradio/pull/4971).\n- Add a `border_color_accent_subdued` theme variable to add a subdued border color to accented items. This is used by chatbot user messages. Set the value of this variable in `Default` theme to `*primary_200`. By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4989](https://github.com/gradio-app/gradio/pull/4989)\n- Add default sketch color argument `brush_color`. Also, masks drawn on images are now slightly translucent (and mask color can also be set via brush_color). By [@aliabid94](https://github.com/aliabid94) in [PR 4979](https://github.com/gradio-app/gradio/pull/4979)\n\n### Bug Fixes:\n\n- Fixes `cancels` for generators so that if a generator is canceled before it is complete, subsequent runs of the event do not continue from the previous iteration, but rather start from the beginning. By [@abidlabs](https://github.com/abidlabs) in [PR 4969](https://github.com/gradio-app/gradio/pull/4969).\n- Use `gr.State` in `gr.ChatInterface` to reduce latency by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4976](https://github.com/gradio-app/gradio/pull/4976)\n- Fix bug with `gr.Interface` where component labels inferred from handler parameters were including special args like `gr.Request` or `gr.EventData`. By [@cbensimon](https://github.com/cbensimon) in [PR 4956](https://github.com/gradio-app/gradio/pull/4956)\n\n#\n\n### Other Changes:\n\n- Apply pyright to the `components` directory by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4948](https://github.com/gradio-app/gradio/pull/4948)\n- Improved look of ChatInterface by [@aliabid94](https://github.com/aliabid94) in [PR 4978](https://github.com/gradio-app/gradio/pull/4978) \n\n## 3.37\n\n### New Features:\n\nIntroducing a new `gr.ChatInterface` abstraction, which allows Gradio users to build fully functioning Chat interfaces very easily. The only required parameter is a chat function `fn`, which accepts a (string) user input `message` and a (list of lists) chat `history` and returns a (string) response. Here's a toy example:\n\n```py\nimport gradio as gr\n\ndef echo(message, history):\n return message\n\ndemo = gr.ChatInterface(fn=echo, examples=[\"hello\", \"hola\", \"merhaba\"], title=\"Echo Bot\")\ndemo.launch()\n```\n\nWhich produces:\n\n\"image\"\n\nAnd a corresponding easy-to-use API at `/chat`:\n\n\"image\"\n\n\nThe `gr.ChatInterface` abstraction works nicely with various LLM libraries, such as `langchain`. See the [dedicated guide](https://gradio.app/guides/creating-a-chatbot-fast) for more examples using `gr.ChatInterface`. Collective team effort in [PR 4869](https://github.com/gradio-app/gradio/pull/4869)\n\n- Chatbot messages now show hyperlinks to download files uploaded to `gr.Chatbot()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4848](https://github.com/gradio-app/gradio/pull/4848)\n- Cached examples now work with generators and async generators by [@abidlabs](https://github.com/abidlabs) in [PR 4927](https://github.com/gradio-app/gradio/pull/4927)\n- Add RTL support to `gr.Markdown`, `gr.Chatbot`, `gr.Textbox` (via the `rtl` boolean parameter) and text-alignment to `gr.Textbox`(via the string `text_align` parameter) by [@abidlabs](https://github.com/abidlabs) in [PR 4933](https://github.com/gradio-app/gradio/pull/4933)\n\nExamples of usage:\n\n```py\nwith gr.Blocks() as demo: \n gr.Textbox(interactive=True, text_align=\"right\")\ndemo.launch()\n```\n\n```py\nwith gr.Blocks() as demo: \n gr.Markdown(\"\u0633\u0644\u0627\u0645\", rtl=True)\ndemo.launch()\n```\n- The `get_api_info` method of `Blocks` now supports layout output components [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4871](https://github.com/gradio-app/gradio/pull/4871)\n\n- Added the support for the new command `gradio environment`to make it easier for people to file bug reports if we shipped an easy command to list the OS, gradio version, and versions of gradio/gradio-client dependencies. bu [@varshneydevansh](https://github.com/varshneydevansh) in [PR 4915](https://github.com/gradio-app/gradio/pull/4915).\n\n### Bug Fixes:\n\n* The `.change()` event is fixed in `Video` and `Image` so that it only fires once by [@abidlabs](https://github.com/abidlabs) in [PR 4793](https://github.com/gradio-app/gradio/pull/4793)\n* The `.change()` event is fixed in `Audio` so that fires when the component value is programmatically updated by [@abidlabs](https://github.com/abidlabs) in [PR 4793](https://github.com/gradio-app/gradio/pull/4793)\n- Add missing `display: flex` property to `Row` so that flex styling is applied to children by [@hannahblair] in [PR 4896](https://github.com/gradio-app/gradio/pull/4896)\n- Fixed bug where `gr.Video` could not preprocess urls by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4904](https://github.com/gradio-app/gradio/pull/4904)\n- Fixed copy button rendering in API page on Safari by [@aliabid94](https://github.com/aliabid94) in [PR 4924](https://github.com/gradio-app/gradio/pull/4924)\n- Fixed `gr.Group` and `container=False`. `container` parameter only available for `Textbox`, `Number`, and `Dropdown`, the only elements where it makes sense. By [@aliabid94](https://github.com/aliabid94) in [PR 4916](https://github.com/gradio-app/gradio/pull/4916)\n- Fixed broken image link in auto-generated `app.py` from `ThemeClass.push_to_hub` by [@deepkyu](https://github.com/deepkyu) in [PR 4944](https://github.com/gradio-app/gradio/pull/4944)\n\n### Other Changes:\n\n- Warning on mobile that if a user leaves the tab, websocket connection may break. On broken connection, tries to rejoin queue and displays error conveying connection broke. By [@aliabid94](https://github.com/aliabid94) in [PR 4742](https://github.com/gradio-app/gradio/pull/4742)\n- Remove blocking network calls made before the local URL gets printed - these slow down the display of the local URL, especially when no internet is available. [@aliabid94](https://github.com/aliabid94) in [PR 4905](https://github.com/gradio-app/gradio/pull/4905).\n- Pinned dependencies to major versions to reduce the likelihood of a broken `gradio` due to changes in downstream dependencies by [@abidlabs](https://github.com/abidlabs) in [PR 4885](https://github.com/gradio-app/gradio/pull/4885)\n- Queue `max_size` defaults to parent Blocks `max_thread` when running on Spaces with ZeroGPU hardware. By [@cbensimon](https://github.com/cbensimon) in [PR 4937](https://github.com/gradio-app/gradio/pull/4937)\n\n### Breaking Changes:\n\nMotivated by the release of `pydantic==2.0`, which included breaking changes that broke a large number of Gradio apps, we've pinned many gradio dependencies. Note that pinned dependencies can cause downstream conflicts, so this may be a breaking change. That being said, we've kept the pins pretty loose, and we're expecting change to be better for the long-term stability of Gradio apps.\n\n## 3.36.1\n\n### New Features:\n\n- Hotfix to support pydantic v1 and v2 by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4835](https://github.com/gradio-app/gradio/pull/4835)\n\n### Bug Fixes:\n\n- Fix bug where `gr.File` change event was not triggered when the value was changed by another event by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4811](https://github.com/gradio-app/gradio/pull/4811)\n\n#\n\n#\n\n## 3.36.0\n\n### New Features:\n\n- The `gr.Video`, `gr.Audio`, `gr.Image`, `gr.Chatbot`, and `gr.Gallery` components now include a share icon when deployed on Spaces. This behavior can be modified by setting the `show_share_button` parameter in the component classes. by [@aliabid94](https://github.com/aliabid94) in [PR 4651](https://github.com/gradio-app/gradio/pull/4651)\n- Allow the web component `space`, `src`, and `host` attributes to be updated dynamically by [@pngwn](https://github.com/pngwn) in [PR 4461](https://github.com/gradio-app/gradio/pull/4461)\n- Suggestion for Spaces Duplication built into Gradio, by [@aliabid94](https://github.com/aliabid94) in [PR 4458](https://github.com/gradio-app/gradio/pull/4458)\n- The `api_name` parameter now accepts `False` as a value, which means it does not show up in named or unnamed endpoints. By [@abidlabs](https://github.com/aliabid94) in [PR 4683](https://github.com/gradio-app/gradio/pull/4683)\n- Added support for `pathlib.Path` in `gr.Video`, `gr.Gallery`, and `gr.Chatbot` by [sunilkumardash9](https://github.com/sunilkumardash9) in [PR 4581](https://github.com/gradio-app/gradio/pull/4581).\n\n### Bug Fixes:\n\n- Updated components with `info` attribute to update when `update()` is called on them. by [@jebarpg](https://github.com/jebarpg) in [PR 4715](https://github.com/gradio-app/gradio/pull/4715).\n- Ensure the `Image` components undo button works mode is `mask` or `color-sketch` by [@amyorz](https://github.com/AmyOrz) in [PR 4692](https://github.com/gradio-app/gradio/pull/4692)\n- Load the iframe resizer external asset asynchronously, by [@akx](https://github.com/akx) in [PR 4336](https://github.com/gradio-app/gradio/pull/4336)\n- Restored missing imports in `gr.components` by [@abidlabs](https://github.com/abidlabs) in [PR 4566](https://github.com/gradio-app/gradio/pull/4566)\n- Fix bug where `select` event was not triggered in `gr.Gallery` if `height` was set to be large with `allow_preview=False` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4551](https://github.com/gradio-app/gradio/pull/4551)\n- Fix bug where setting `visible=False` in `gr.Group` event did not work by [@abidlabs](https://github.com/abidlabs) in [PR 4567](https://github.com/gradio-app/gradio/pull/4567)\n- Fix `make_waveform` to work with paths that contain spaces [@akx](https://github.com/akx) in [PR 4570](https://github.com/gradio-app/gradio/pull/4570) & [PR 4578](https://github.com/gradio-app/gradio/pull/4578)\n- Send captured data in `stop_recording` event for `gr.Audio` and `gr.Video` components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4554](https://github.com/gradio-app/gradio/pull/4554)\n- Fix bug in `gr.Gallery` where `height` and `object_fit` parameters where being ignored by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4576](https://github.com/gradio-app/gradio/pull/4576)\n- Fixes an HTML sanitization issue in DOMPurify where links in markdown were not opening in a new window by [@hannahblair] in [PR 4577](https://github.com/gradio-app/gradio/pull/4577)\n- Fixed Dropdown height rendering in Columns by [@aliabid94](https://github.com/aliabid94) in [PR 4584](https://github.com/gradio-app/gradio/pull/4584) \n- Fixed bug where `AnnotatedImage` css styling was causing the annotation masks to not be displayed correctly by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4628](https://github.com/gradio-app/gradio/pull/4628)\n- Ensure that Gradio does not silently fail when running on a port that is occupied by [@abidlabs](https://github.com/abidlabs) in [PR 4624](https://github.com/gradio-app/gradio/pull/4624).\n- Fix double upload bug that caused lag in file uploads by [@aliabid94](https://github.com/aliabid94) in [PR 4661](https://github.com/gradio-app/gradio/pull/4661)\n- `Progress` component now appears even when no `iterable` is specified in `tqdm` constructor by [@itrushkin](https://github.com/itrushkin) in [PR 4475](https://github.com/gradio-app/gradio/pull/4475)\n- Deprecation warnings now point at the user code using those deprecated features, instead of Gradio internals, by (https://github.com/akx) in [PR 4694](https://github.com/gradio-app/gradio/pull/4694)\n- Adapt column widths in gr.Examples based on content by [@pngwn](https://github.com/pngwn) & [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4700](https://github.com/gradio-app/gradio/pull/4700)\n- The `plot` parameter deprecation warnings should now only be emitted for `Image` components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4709](https://github.com/gradio-app/gradio/pull/4709)\n- Removed uncessessary `type` deprecation warning by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4709](https://github.com/gradio-app/gradio/pull/4709)\n- Ensure Audio autoplays works when `autoplay=True` and the video source is dynamically updated [@pngwn](https://github.com/pngwn) in [PR 4705](https://github.com/gradio-app/gradio/pull/4705)\n- When an error modal is shown in spaces, ensure we scroll to the top so it can be seen by [@pngwn](https://github.com/pngwn) in [PR 4712](https://github.com/gradio-app/gradio/pull/4712)\n- Update depedencies by [@pngwn](https://github.com/pngwn) in [PR 4675](https://github.com/gradio-app/gradio/pull/4675)\n- Fixes `gr.Dropdown` being cutoff at the bottom by [@abidlabs](https://github.com/abidlabs) in [PR 4691](https://github.com/gradio-app/gradio/pull/4691).\n- Scroll top when clicking \"View API\" in spaces by [@pngwn](https://github.com/pngwn) in [PR 4714](https://github.com/gradio-app/gradio/pull/4714)\n- Fix bug where `show_label` was hiding the entire component for `gr.Label` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4713](https://github.com/gradio-app/gradio/pull/4713)\n- Don't crash when uploaded image has broken EXIF data, by [@akx](https://github.com/akx) in [PR 4764](https://github.com/gradio-app/gradio/pull/4764)\n- Place toast messages at the top of the screen by [@pngwn](https://github.com/pngwn) in [PR 4796](https://github.com/gradio-app/gradio/pull/4796)\n- Fix regressed styling of Login page when auth is enabled by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4797](https://github.com/gradio-app/gradio/pull/4797)\n- Prevent broken scrolling to output on Spaces by [@aliabid94](https://github.com/aliabid94) in [PR 4822](https://github.com/gradio-app/gradio/pull/4822)\n\n### Other Changes:\n\n- Add `.git-blame-ignore-revs` by [@akx](https://github.com/akx) in [PR 4586](https://github.com/gradio-app/gradio/pull/4586)\n- Update frontend dependencies in [PR 4601](https://github.com/gradio-app/gradio/pull/4601)\n- Use `typing.Literal` where possible in gradio library and client by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4608](https://github.com/gradio-app/gradio/pull/4608)\n- Remove unnecessary mock json files for frontend E2E tests by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4625](https://github.com/gradio-app/gradio/pull/4625)\n- Update dependencies by [@pngwn](https://github.com/pngwn) in [PR 4643](https://github.com/gradio-app/gradio/pull/4643)\n- The theme builder now launches successfully, and the API docs are cleaned up. By [@abidlabs](https://github.com/aliabid94) in [PR 4683](https://github.com/gradio-app/gradio/pull/4683)\n- Remove `cleared_value` from some components as its no longer used internally by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4685](https://github.com/gradio-app/gradio/pull/4685)\n- Better errors when you define two Blocks and reference components in one Blocks from the events in the other Blocks [@abidlabs](https://github.com/abidlabs) in [PR 4738](https://github.com/gradio-app/gradio/pull/4738).\n- Better message when share link is not created by [@abidlabs](https://github.com/abidlabs) in [PR 4773](https://github.com/gradio-app/gradio/pull/4773).\n- Improve accessibility around selected images in gr.Gallery component by [@hannahblair](https://github.com/hannahblair) in [PR 4790](https://github.com/gradio-app/gradio/pull/4790)\n\n### Breaking Changes:\n\n[PR 4683](https://github.com/gradio-app/gradio/pull/4683) removes the explict named endpoint \"load_examples\" from gr.Interface that was introduced in [PR 4456](https://github.com/gradio-app/gradio/pull/4456).\n\n## 3.35.2\n\n#\n\n### Bug Fixes:\n\n- Fix chatbot streaming by [@aliabid94](https://github.com/aliabid94) in [PR 4537](https://github.com/gradio-app/gradio/pull/4537)\n- Fix chatbot height and scrolling by [@aliabid94](https://github.com/aliabid94) in [PR 4540](https://github.com/gradio-app/gradio/pull/4540)\n\n#\n\n#\n\n## 3.35.1\n\n#\n\n### Bug Fixes:\n\n- Fix chatbot streaming by [@aliabid94](https://github.com/aliabid94) in [PR 4537](https://github.com/gradio-app/gradio/pull/4537)\n- Fix error modal position and text size by [@pngwn](https://github.com/pngwn) in [PR 4538](https://github.com/gradio-app/gradio/pull/4538).\n\n#\n\n#\n\n## 3.35.0\n\n### New Features:\n\n- A `gr.ClearButton` which allows users to easily clear the values of components by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456)\n\nExample usage:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot([(\"Hello\", \"How are you?\")])\n with gr.Row():\n textbox = gr.Textbox(scale=3, interactive=True)\n gr.ClearButton([textbox, chatbot], scale=1)\n\ndemo.launch()\n```\n\n- Min and max value for gr.Number by [@artegoser](https://github.com/artegoser) and [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3991](https://github.com/gradio-app/gradio/pull/3991)\n- Add `start_recording` and `stop_recording` events to `Video` and `Audio` components by [@pngwn](https://github.com/pngwn) in [PR 4422](https://github.com/gradio-app/gradio/pull/4422)\n- Allow any function to generate an error message and allow multiple messages to appear at a time. Other error modal improvements such as auto dismiss after a time limit and a new layout on mobile [@pngwn](https://github.com/pngwn) in [PR 4459](https://github.com/gradio-app/gradio/pull/4459).\n- Add `autoplay` kwarg to `Video` and `Audio` components by [@pngwn](https://github.com/pngwn) in [PR 4453](https://github.com/gradio-app/gradio/pull/4453)\n- Add `allow_preview` parameter to `Gallery` to control whether a detailed preview is displayed on click by\n[@freddyaboulton](https://github.com/freddyaboulton) in [PR 4470](https://github.com/gradio-app/gradio/pull/4470)\n- Add `latex_delimiters` parameter to `Chatbot` to control the delimiters used for LaTeX and to disable LaTeX in the `Chatbot` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4516](https://github.com/gradio-app/gradio/pull/4516)\n- Can now issue `gr.Warning` and `gr.Info` modals. Simply put the code `gr.Warning(\"Your warning message\")` or `gr.Info(\"Your info message\")` as a standalone line in your function. By [@aliabid94](https://github.com/aliabid94) in [PR 4518](https://github.com/gradio-app/gradio/pull/4518). \n\nExample:\n```python\ndef start_process(name):\n gr.Info(\"Starting process\")\n if name is None:\n gr.Warning(\"Name is empty\")\n ...\n if success == False:\n raise gr.Error(\"Process failed\")\n```\n\n\n### Bug Fixes:\n\n- Add support for PAUSED state in the JS client by [@abidlabs](https://github.com/abidlabs) in [PR 4438](https://github.com/gradio-app/gradio/pull/4438)\n- Ensure Tabs only occupy the space required by [@pngwn](https://github.com/pngwn) in [PR 4419](https://github.com/gradio-app/gradio/pull/4419)\n- Ensure components have the correct empty sizes to prevent empty containers from collapsing by [@pngwn](https://github.com/pngwn) in [PR 4447](https://github.com/gradio-app/gradio/pull/4447).\n- Frontend code no longer crashes when there is a relative URL in an `` element, by [@akx](https://github.com/akx) in [PR 4449](https://github.com/gradio-app/gradio/pull/4449).\n- Fix bug where setting `format='mp4'` on a video component would cause the function to error out if the uploaded video was not playable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4467](https://github.com/gradio-app/gradio/pull/4467)\n- Fix `_js` parameter to work even without backend function, by [@aliabid94](https://github.com/aliabid94) in [PR 4486](https://github.com/gradio-app/gradio/pull/4486).\n- Fix new line issue with `gr.Chatbot()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4491](https://github.com/gradio-app/gradio/pull/4491)\n- Fixes issue with Clear button not working for `Label` component by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456)\n- Restores the ability to pass in a tuple (sample rate, audio array) to gr.Audio() by [@abidlabs](https://github.com/abidlabs) in [PR 4525](https://github.com/gradio-app/gradio/pull/4525)\n- Ensure code is correctly formatted and copy button is always present in Chatbot by [@pngwn](https://github.com/pngwn) in [PR 4527](https://github.com/gradio-app/gradio/pull/4527)\n- `show_label` will not automatically be set to `True` in `gr.BarPlot.update` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4531](https://github.com/gradio-app/gradio/pull/4531)\n- `gr.BarPlot` group text now respects darkmode by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4531](https://github.com/gradio-app/gradio/pull/4531)\n- Fix dispatched errors from within components [@aliabid94](https://github.com/aliabid94) in [PR 4786](https://github.com/gradio-app/gradio/pull/4786)\n\n### Other Changes:\n\n- Change styling of status and toast error components by [@hannahblair](https://github.com/hannahblair) in [PR 4454](https://github.com/gradio-app/gradio/pull/4454).\n- Clean up unnecessary `new Promise()`s by [@akx](https://github.com/akx) in [PR 4442](https://github.com/gradio-app/gradio/pull/4442).\n- Minor UI cleanup for Examples and Dataframe components [@aliabid94](https://github.com/aliabid94) in [PR 4455](https://github.com/gradio-app/gradio/pull/4455). \n- Minor UI cleanup for Examples and Dataframe components [@aliabid94](https://github.com/aliabid94) in [PR 4455](https://github.com/gradio-app/gradio/pull/4455).\n- Add Catalan translation [@jordimas](https://github.com/jordimas) in [PR 4483](https://github.com/gradio-app/gradio/pull/4483).\n- The API endpoint that loads examples upon click has been given an explicit name (\"/load_examples\") by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456).\n- Allows configuration of FastAPI app when calling `mount_gradio_app`, by [@charlesfrye](https://github.com/charlesfrye) in [PR4519](https://github.com/gradio-app/gradio/pull/4519).\n\n### Breaking Changes:\n\n- The behavior of the `Clear` button has been changed for `Slider`, `CheckboxGroup`, `Radio`, `Dropdown` components by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456). The Clear button now sets the value of these components to be empty as opposed to the original default set by the developer. This is to make them in line with the rest of the Gradio components.\n- Python 3.7 end of life is June 27 2023. Gradio will no longer support python 3.7 by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4484](https://github.com/gradio-app/gradio/pull/4484)\n- Removed `$` as a default LaTeX delimiter for the `Chatbot` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4516](https://github.com/gradio-app/gradio/pull/4516). The specific LaTeX delimeters can be set using the new `latex_delimiters` parameter in `Chatbot`.\n\n## 3.34.0\n\n### New Features:\n\n- The `gr.UploadButton` component now supports the `variant` and `interactive` parameters by [@abidlabs](https://github.com/abidlabs) in [PR 4436](https://github.com/gradio-app/gradio/pull/4436).\n\n### Bug Fixes:\n\n- Remove target=\"\\_blank\" override on anchor tags with internal targets by [@hannahblair](https://github.com/hannahblair) in [PR 4405](https://github.com/gradio-app/gradio/pull/4405)\n- Fixed bug where `gr.File(file_count='multiple')` could not be cached as output by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4421](https://github.com/gradio-app/gradio/pull/4421)\n- Restricts the domains that can be proxied via `/proxy` route by [@abidlabs](https://github.com/abidlabs) in [PR 4406](https://github.com/gradio-app/gradio/pull/4406).\n- Fixes issue where `gr.UploadButton` could not be used to upload the same file twice by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4437](https://github.com/gradio-app/gradio/pull/4437)\n- Fixes bug where `/proxy` route was being incorrectly constructed by the frontend by [@abidlabs](https://github.com/abidlabs) in [PR 4430](https://github.com/gradio-app/gradio/pull/4430).\n- Fix z-index of status component by [@hannahblair](https://github.com/hannahblair) in [PR 4429](https://github.com/gradio-app/gradio/pull/4429)\n- Fix video rendering in Safari by [@aliabid94](https://github.com/aliabid94) in [PR 4433](https://github.com/gradio-app/gradio/pull/4433).\n- The output directory for files downloaded when calling Blocks as a function is now set to a temporary directory by default (instead of the working directory in some cases) by [@abidlabs](https://github.com/abidlabs) in [PR 4501](https://github.com/gradio-app/gradio/pull/4501)\n\n### Other Changes:\n\n- When running on Spaces, handler functions will be transformed by the [PySpaces](https://pypi.org/project/spaces/) library in order to make them work with specific hardware. It will have no effect on standalone Gradio apps or regular Gradio Spaces and can be globally deactivated as follows : `import spaces; spaces.disable_gradio_auto_wrap()` by [@cbensimon](https://github.com/cbensimon) in [PR 4389](https://github.com/gradio-app/gradio/pull/4389).\n- Deprecated `.style` parameter and moved arguments to constructor. Added support for `.update()` to all arguments initially in style. Added `scale` and `min_width` support to every Component. By [@aliabid94](https://github.com/aliabid94) in [PR 4374](https://github.com/gradio-app/gradio/pull/4374)\n\n#\n\n## 3.33.1\n\n#\n\n### Bug Fixes:\n\n- Allow `every` to work with generators by [@dkjshk](https://github.com/dkjshk) in [PR 4434](https://github.com/gradio-app/gradio/pull/4434)\n- Fix z-index of status component by [@hannahblair](https://github.com/hannahblair) in [PR 4429](https://github.com/gradio-app/gradio/pull/4429)\n- Allow gradio to work offline, by [@aliabid94](https://github.com/aliabid94) in [PR 4398](https://github.com/gradio-app/gradio/pull/4398).\n- Fixed `validate_url` to check for 403 errors and use a GET request in place of a HEAD by [@alvindaiyan](https://github.com/alvindaiyan) in [PR 4388](https://github.com/gradio-app/gradio/pull/4388).\n\n### Other Changes:\n\n- More explicit error message when share link binary is blocked by antivirus by [@abidlabs](https://github.com/abidlabs) in [PR 4380](https://github.com/gradio-app/gradio/pull/4380).\n\n#\n\n## 3.33.0\n\n### New Features:\n\n- Introduced `gradio deploy` to launch a Gradio app to Spaces directly from your terminal. By [@aliabid94](https://github.com/aliabid94) in [PR 4033](https://github.com/gradio-app/gradio/pull/4033).\n- Introduce `show_progress='corner'` argument to event listeners, which will not cover the output components with the progress animation, but instead show it in the corner of the components. By [@aliabid94](https://github.com/aliabid94) in [PR 4396](https://github.com/gradio-app/gradio/pull/4396).\n\n### Bug Fixes:\n\n- Fix bug where Label change event was triggering itself by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4371](https://github.com/gradio-app/gradio/pull/4371)\n- Make `Blocks.load` behave like other event listeners (allows chaining `then` off of it) [@anentropic](https://github.com/anentropic/) in [PR 4304](https://github.com/gradio-app/gradio/pull/4304)\n- Respect `interactive=True` in output components of a `gr.Interface` by [@abidlabs](https://github.com/abidlabs) in [PR 4356](https://github.com/gradio-app/gradio/pull/4356).\n- Remove unused frontend code by [@akx](https://github.com/akx) in [PR 4275](https://github.com/gradio-app/gradio/pull/4275)\n- Fixes favicon path on Windows by [@abidlabs](https://github.com/abidlabs) in [PR 4369](https://github.com/gradio-app/gradio/pull/4369).\n- Prevent path traversal in `/file` routes by [@abidlabs](https://github.com/abidlabs) in [PR 4370](https://github.com/gradio-app/gradio/pull/4370).\n- Do not send HF token to other domains via `/proxy` route by [@abidlabs](https://github.com/abidlabs) in [PR 4368](https://github.com/gradio-app/gradio/pull/4368).\n- Replace default `markedjs` sanitize function with DOMPurify sanitizer for `gr.Chatbot()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4360](https://github.com/gradio-app/gradio/pull/4360)\n- Prevent the creation of duplicate copy buttons in the chatbot and ensure copy buttons work in non-secure contexts by [@binary-husky](https://github.com/binary-husky) in [PR 4350](https://github.com/gradio-app/gradio/pull/4350).\n\n### Other Changes:\n\n- Remove flicker of loading bar by adding opacity transition, by [@aliabid94](https://github.com/aliabid94) in [PR 4349](https://github.com/gradio-app/gradio/pull/4349).\n- Performance optimization in the frontend's Blocks code by [@akx](https://github.com/akx) in [PR 4334](https://github.com/gradio-app/gradio/pull/4334)\n- Upgrade the pnpm lock file format version from v6.0 to v6.1 by [@whitphx](https://github.com/whitphx) in [PR 4393](https://github.com/gradio-app/gradio/pull/4393)\n\n### Breaking Changes:\n\n- The `/file=` route no longer allows accessing dotfiles or files in \"dot directories\" by [@akx](https://github.com/akx) in [PR 4303](https://github.com/gradio-app/gradio/pull/4303)\n\n## 3.32.0\n\n### New Features:\n\n- `Interface.launch()` and `Blocks.launch()` now accept an `app_kwargs` argument to allow customizing the configuration of the underlying FastAPI app, by [@akx](https://github.com/akx) in [PR 4282](https://github.com/gradio-app/gradio/pull/4282)\n\n### Bug Fixes:\n\n- Fixed Gallery/AnnotatedImage components not respecting GRADIO_DEFAULT_DIR variable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4256](https://github.com/gradio-app/gradio/pull/4256)\n- Fixed Gallery/AnnotatedImage components resaving identical images by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4256](https://github.com/gradio-app/gradio/pull/4256)\n- Fixed Audio/Video/File components creating empty tempfiles on each run by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4256](https://github.com/gradio-app/gradio/pull/4256)\n- Fixed the behavior of the `run_on_click` parameter in `gr.Examples` by [@abidlabs](https://github.com/abidlabs) in [PR 4258](https://github.com/gradio-app/gradio/pull/4258).\n- Ensure error modal displays when the queue is enabled by [@pngwn](https://github.com/pngwn) in [PR 4273](https://github.com/gradio-app/gradio/pull/4273)\n- Ensure js client respcts the full root when making requests to the server by [@pngwn](https://github.com/pngwn) in [PR 4271](https://github.com/gradio-app/gradio/pull/4271)\n\n### Other Changes:\n\n- Refactor web component `initial_height` attribute by [@whitphx](https://github.com/whitphx) in [PR 4223](https://github.com/gradio-app/gradio/pull/4223)\n- Relocate `mount_css` fn to remove circular dependency [@whitphx](https://github.com/whitphx) in [PR 4222](https://github.com/gradio-app/gradio/pull/4222)\n- Upgrade Black to 23.3 by [@akx](https://github.com/akx) in [PR 4259](https://github.com/gradio-app/gradio/pull/4259)\n- Add frontend LaTeX support in `gr.Chatbot()` using `KaTeX` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4285](https://github.com/gradio-app/gradio/pull/4285).\n\n#\n\n## 3.31.0\n\n### New Features:\n\n- The reloader command (`gradio app.py`) can now accept command line arguments by [@micky2be](https://github.com/micky2be) in [PR 4119](https://github.com/gradio-app/gradio/pull/4119)\n- Added `format` argument to `Audio` component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4178](https://github.com/gradio-app/gradio/pull/4178)\n- Add JS client code snippets to use via api page by [@aliabd](https://github.com/aliabd) in [PR 3927](https://github.com/gradio-app/gradio/pull/3927).\n- Update to the JS client by [@pngwn](https://github.com/pngwn) in [PR 4202](https://github.com/gradio-app/gradio/pull/4202)\n\n### Bug Fixes:\n\n- Fix \"TypeError: issubclass() arg 1 must be a class\" When use Optional[Types] by [@lingfengchencn](https://github.com/lingfengchencn) in [PR 4200](https://github.com/gradio-app/gradio/pull/4200).\n- Gradio will no longer send any analytics or call home if analytics are disabled with the GRADIO_ANALYTICS_ENABLED environment variable. By [@akx](https://github.com/akx) in [PR 4194](https://github.com/gradio-app/gradio/pull/4194) and [PR 4236](https://github.com/gradio-app/gradio/pull/4236)\n- The deprecation warnings for kwargs now show the actual stack level for the invocation, by [@akx](https://github.com/akx) in [PR 4203](https://github.com/gradio-app/gradio/pull/4203).\n- Fix \"TypeError: issubclass() arg 1 must be a class\" When use Optional[Types] by [@lingfengchencn](https://github.com/lingfengchencn) in [PR 4200](https://github.com/gradio-app/gradio/pull/4200).\n- Ensure cancelling functions work correctly by [@pngwn](https://github.com/pngwn) in [PR 4225](https://github.com/gradio-app/gradio/pull/4225)\n- Fixes a bug with typing.get_type_hints() on Python 3.9 by [@abidlabs](https://github.com/abidlabs) in [PR 4228](https://github.com/gradio-app/gradio/pull/4228).\n- Fixes JSONDecodeError by [@davidai](https://github.com/davidai) in [PR 4241](https://github.com/gradio-app/gradio/pull/4241)\n- Fix `chatbot_dialogpt` demo by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4238](https://github.com/gradio-app/gradio/pull/4238).\n\n### Other Changes:\n\n- Change `gr.Chatbot()` markdown parsing to frontend using `marked` library and `prism` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4150](https://github.com/gradio-app/gradio/pull/4150)\n- Update the js client by [@pngwn](https://github.com/pngwn) in [PR 3899](https://github.com/gradio-app/gradio/pull/3899)\n- Fix documentation for the shape of the numpy array produced by the `Image` component by [@der3318](https://github.com/der3318) in [PR 4204](https://github.com/gradio-app/gradio/pull/4204).\n- Updates the timeout for websocket messaging from 1 second to 5 seconds by [@abidlabs](https://github.com/abidlabs) in [PR 4235](https://github.com/gradio-app/gradio/pull/4235)\n\n#\n\n## 3.30.0\n\n### New Features:\n\n- Adds a `root_path` parameter to `launch()` that allows running Gradio applications on subpaths (e.g. www.example.com/app) behind a proxy, by [@abidlabs](https://github.com/abidlabs) in [PR 4133](https://github.com/gradio-app/gradio/pull/4133)\n- Fix dropdown change listener to trigger on change when updated as an output by [@aliabid94](https://github.com/aliabid94) in [PR 4128](https://github.com/gradio-app/gradio/pull/4128).\n- Add `.input` event listener, which is only triggered when a user changes the component value (as compared to `.change`, which is also triggered when a component updates as the result of a function trigger), by [@aliabid94](https://github.com/aliabid94) in [PR 4157](https://github.com/gradio-app/gradio/pull/4157).\n\n### Bug Fixes:\n\n- Records username when flagging by [@abidlabs](https://github.com/abidlabs) in [PR 4135](https://github.com/gradio-app/gradio/pull/4135)\n- Fix website build issue by [@aliabd](https://github.com/aliabd) in [PR 4142](https://github.com/gradio-app/gradio/pull/4142)\n- Fix lang agnostic type info for `gr.File(file_count='multiple')` output components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4153](https://github.com/gradio-app/gradio/pull/4153)\n\n#\n\n#\n\n## 3.29.0\n\n### New Features:\n\n- Returning language agnostic types in the `/info` route by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4039](https://github.com/gradio-app/gradio/pull/4039)\n\n### Bug Fixes:\n\n- Allow users to upload audio files in Audio component on iOS by by [@aliabid94](https://github.com/aliabid94) in [PR 4071](https://github.com/gradio-app/gradio/pull/4071).\n- Fixes the gradio theme builder error that appeared on launch by [@aliabid94](https://github.com/aliabid94) and [@abidlabs](https://github.com/abidlabs) in [PR 4080](https://github.com/gradio-app/gradio/pull/4080)\n- Keep Accordion content in DOM by [@aliabid94](https://github.com/aliabid94) in [PR 4070](https://github.com/gradio-app/gradio/pull/4073)\n- Fixed bug where type hints in functions caused the event handler to crash by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4068](https://github.com/gradio-app/gradio/pull/4068)\n- Fix dropdown default value not appearing by [@aliabid94](https://github.com/aliabid94) in [PR 4072](https://github.com/gradio-app/gradio/pull/4072).\n- Soft theme label color fix by [@aliabid94](https://github.com/aliabid94) in [PR 4070](https://github.com/gradio-app/gradio/pull/4070)\n- Fix `gr.Slider` `release` event not triggering on mobile by [@space-nuko](https://github.com/space-nuko) in [PR 4098](https://github.com/gradio-app/gradio/pull/4098)\n- Removes extraneous `State` component info from the `/info` route by [@abidlabs](https://github.com/freddyaboulton) in [PR 4107](https://github.com/gradio-app/gradio/pull/4107)\n- Make .then() work even if first event fails by [@aliabid94](https://github.com/aliabid94) in [PR 4115](https://github.com/gradio-app/gradio/pull/4115).\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Allow users to submit with enter in Interfaces with textbox / number inputs [@aliabid94](https://github.com/aliabid94) in [PR 4090](https://github.com/gradio-app/gradio/pull/4090).\n- Updates gradio's requirements.txt to requires uvicorn>=0.14.0 by [@abidlabs](https://github.com/abidlabs) in [PR 4086](https://github.com/gradio-app/gradio/pull/4086)\n- Updates some error messaging by [@abidlabs](https://github.com/abidlabs) in [PR 4086](https://github.com/gradio-app/gradio/pull/4086)\n- Renames simplified Chinese translation file from `zh-cn.json` to `zh-CN.json` by [@abidlabs](https://github.com/abidlabs) in [PR 4086](https://github.com/gradio-app/gradio/pull/4086)\n\n#\n\n## 3.28.3\n\n#\n\n### Bug Fixes:\n\n- Fixes issue with indentation in `gr.Code()` component with streaming by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4043](https://github.com/gradio-app/gradio/pull/4043)\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.28.2\n\n### Bug Fixes\n\n- Code component visual updates by [@pngwn](https://github.com/pngwn) in [PR 4051](https://github.com/gradio-app/gradio/pull/4051)\n\n### New Features:\n\n- Add support for `visual-question-answering`, `document-question-answering`, and `image-to-text` using `gr.Interface.load(\"models/...\")` and `gr.Interface.from_pipeline` by [@osanseviero](https://github.com/osanseviero) in [PR 3887](https://github.com/gradio-app/gradio/pull/3887)\n- Add code block support in `gr.Chatbot()`, by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4048](https://github.com/gradio-app/gradio/pull/4048)\n- Adds the ability to blocklist filepaths (and also improves the allowlist mechanism) by [@abidlabs](https://github.com/abidlabs) in [PR 4047](https://github.com/gradio-app/gradio/pull/4047).\n- Adds the ability to specify the upload directory via an environment variable by [@abidlabs](https://github.com/abidlabs) in [PR 4047](https://github.com/gradio-app/gradio/pull/4047).\n\n### Bug Fixes:\n\n- Fixes issue with `matplotlib` not rendering correctly if the backend was not set to `Agg` by [@abidlabs](https://github.com/abidlabs) in [PR 4029](https://github.com/gradio-app/gradio/pull/4029)\n- Fixes bug where rendering the same `gr.State` across different Interfaces/Blocks within larger Blocks would not work by [@abidlabs](https://github.com/abidlabs) in [PR 4030](https://github.com/gradio-app/gradio/pull/4030)\n- Code component visual updates by [@pngwn](https://github.com/pngwn) in [PR 4051](https://github.com/gradio-app/gradio/pull/4051)\n\n### Documentation Changes:\n\n- Adds a Guide on how to use the Python Client within a FastAPI app, by [@abidlabs](https://github.com/abidlabs) in [PR 3892](https://github.com/gradio-app/gradio/pull/3892)\n\n#\n\n### Breaking Changes:\n\n- `gr.HuggingFaceDatasetSaver` behavior changed internally. The `flagging/` folder is not a `.git/` folder anymore when using it. `organization` parameter is now ignored in favor of passing a full dataset id as `dataset_name` (e.g. `\"username/my-dataset\"`).\n- New lines (`\\n`) are not automatically converted to `
` in `gr.Markdown()` or `gr.Chatbot()`. For multiple new lines, a developer must add multiple `
` tags.\n\n### Full Changelog:\n\n- Safer version of `gr.HuggingFaceDatasetSaver` using HTTP methods instead of git pull/push by [@Wauplin](https://github.com/Wauplin) in [PR 3973](https://github.com/gradio-app/gradio/pull/3973)\n\n#\n\n## 3.28.1\n\n### New Features:\n\n- Add a \"clear mask\" button to `gr.Image` sketch modes, by [@space-nuko](https://github.com/space-nuko) in [PR 3615](https://github.com/gradio-app/gradio/pull/3615)\n\n### Bug Fixes:\n\n- Fix dropdown default value not appearing by [@aliabid94](https://github.com/aliabid94) in [PR 3996](https://github.com/gradio-app/gradio/pull/3996).\n- Fix faded coloring of output textboxes in iOS / Safari by [@aliabid94](https://github.com/aliabid94) in [PR 3993](https://github.com/gradio-app/gradio/pull/3993)\n\n#\n\n### Testing and Infrastructure Changes:\n\n- CI: Simplified Python CI workflow by [@akx](https://github.com/akx) in [PR 3982](https://github.com/gradio-app/gradio/pull/3982)\n- Upgrade pyright to 1.1.305 by [@akx](https://github.com/akx) in [PR 4042](https://github.com/gradio-app/gradio/pull/4042)\n- More Ruff rules are enabled and lint errors fixed by [@akx](https://github.com/akx) in [PR 4038](https://github.com/gradio-app/gradio/pull/4038)\n\n#\n\n#\n\n#\n\n## 3.28.0\n\n### Bug Fixes:\n\n- Fix duplicate play commands in full-screen mode of 'video'. by [@tomchang25](https://github.com/tomchang25) in [PR 3968](https://github.com/gradio-app/gradio/pull/3968).\n- Fix the issue of the UI stuck caused by the 'selected' of DataFrame not being reset. by [@tomchang25](https://github.com/tomchang25) in [PR 3916](https://github.com/gradio-app/gradio/pull/3916).\n- Fix issue where `gr.Video()` would not work inside a `gr.Tab()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3891](https://github.com/gradio-app/gradio/pull/3891)\n- Fixed issue with old_value check in File. by [@tomchang25](https://github.com/tomchang25) in [PR 3859](https://github.com/gradio-app/gradio/pull/3859).\n- Fixed bug where all bokeh plots appeared in the same div by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3896](https://github.com/gradio-app/gradio/pull/3896)\n- Fixed image outputs to automatically take full output image height, unless explicitly set, by [@aliabid94](https://github.com/aliabid94) in [PR 3905](https://github.com/gradio-app/gradio/pull/3905)\n- Fix issue in `gr.Gallery()` where setting height causes aspect ratio of images to collapse by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3830](https://github.com/gradio-app/gradio/pull/3830)\n- Fix issue where requesting for a non-existing file would trigger a 500 error by [@micky2be](https://github.com/micky2be) in `[PR 3895](https://github.com/gradio-app/gradio/pull/3895)`.\n- Fix bugs with abspath about symlinks, and unresolvable path on Windows by [@micky2be](https://github.com/micky2be) in `[PR 3895](https://github.com/gradio-app/gradio/pull/3895)`.\n- Fixes type in client `Status` enum by [@10zinten](https://github.com/10zinten) in [PR 3931](https://github.com/gradio-app/gradio/pull/3931)\n- Fix `gr.ChatBot` to handle image url [tye-singwa](https://github.com/tye-signwa) in [PR 3953](https://github.com/gradio-app/gradio/pull/3953)\n- Move Google Tag Manager related initialization code to analytics-enabled block by [@akx](https://github.com/akx) in [PR 3956](https://github.com/gradio-app/gradio/pull/3956)\n- Fix bug where port was not reused if the demo was closed and then re-launched by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3896](https://github.com/gradio-app/gradio/pull/3959)\n- Fixes issue where dropdown does not position itself at selected element when opened [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3639](https://github.com/gradio-app/gradio/pull/3639)\n\n### Documentation Changes:\n\n- Make use of `gr` consistent across the docs by [@duerrsimon](https://github.com/duerrsimon) in [PR 3901](https://github.com/gradio-app/gradio/pull/3901)\n- Fixed typo in theming-guide.md by [@eltociear](https://github.com/eltociear) in [PR 3952](https://github.com/gradio-app/gradio/pull/3952)\n\n### Testing and Infrastructure Changes:\n\n- CI: Python backend lint is only run once, by [@akx](https://github.com/akx) in [PR 3960](https://github.com/gradio-app/gradio/pull/3960)\n- Format invocations and concatenations were replaced by f-strings where possible by [@akx](https://github.com/akx) in [PR 3984](https://github.com/gradio-app/gradio/pull/3984)\n- Linting rules were made more strict and issues fixed by [@akx](https://github.com/akx) in [PR 3979](https://github.com/gradio-app/gradio/pull/3979).\n\n### Breaking Changes:\n\n- Some re-exports in `gradio.themes` utilities (introduced in 3.24.0) have been eradicated.\n By [@akx](https://github.com/akx) in [PR 3958](https://github.com/gradio-app/gradio/pull/3958)\n\n### Full Changelog:\n\n- Add DESCRIPTION.md to image_segmentation demo by [@aliabd](https://github.com/aliabd) in [PR 3866](https://github.com/gradio-app/gradio/pull/3866)\n- Fix error in running `gr.themes.builder()` by [@deepkyu](https://github.com/deepkyu) in [PR 3869](https://github.com/gradio-app/gradio/pull/3869)\n- Fixed a JavaScript TypeError when loading custom JS with `_js` and setting `outputs` to `None` in `gradio.Blocks()` by [@DavG25](https://github.com/DavG25) in [PR 3883](https://github.com/gradio-app/gradio/pull/3883)\n- Fixed bg_background_fill theme property to expand to whole background, block_radius to affect form elements as well, and added block_label_shadow theme property by [@aliabid94](https://github.com/aliabid94) in [PR 3590](https://github.com/gradio-app/gradio/pull/3590)\n\n#\n\n## 3.27.0\n\n### New Features:\n\n###### AnnotatedImage Component\n\nNew AnnotatedImage component allows users to highlight regions of an image, either by providing bounding boxes, or 0-1 pixel masks. This component is useful for tasks such as image segmentation, object detection, and image captioning.\n\n![AnnotatedImage screenshot](https://user-images.githubusercontent.com/7870876/232142720-86e0020f-beaf-47b9-a843-689c9621f09c.gif)\n\nExample usage:\n\n```python\nwith gr.Blocks() as demo:\n img = gr.Image()\n img_section = gr.AnnotatedImage()\n def mask(img):\n top_left_corner = [0, 0, 20, 20]\n random_mask = np.random.randint(0, 2, img.shape[:2])\n return (img, [(top_left_corner, \"left corner\"), (random_mask, \"random\")])\n img.change(mask, img, img_section)\n```\n\nSee the [image_segmentation demo](https://github.com/gradio-app/gradio/tree/main/demo/image_segmentation) for a full example. By [@aliabid94](https://github.com/aliabid94) in [PR 3836](https://github.com/gradio-app/gradio/pull/3836)\n\n#\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.26.0\n\n### New Features:\n\n###### `Video` component supports subtitles\n\n- Allow the video component to accept subtitles as input, by [@tomchang25](https://github.com/tomchang25) in [PR 3673](https://github.com/gradio-app/gradio/pull/3673). To provide subtitles, simply return a tuple consisting of `(path_to_video, path_to_subtitles)` from your function. Both `.srt` and `.vtt` formats are supported:\n\n```py\nwith gr.Blocks() as demo:\n gr.Video((\"video.mp4\", \"captions.srt\"))\n```\n\n### Bug Fixes:\n\n- Fix code markdown support in `gr.Chatbot()` component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3816](https://github.com/gradio-app/gradio/pull/3816)\n\n### Documentation Changes:\n\n- Updates the \"view API\" page in Gradio apps to use the `gradio_client` library by [@aliabd](https://github.com/aliabd) in [PR 3765](https://github.com/gradio-app/gradio/pull/3765)\n\n- Read more about how to use the `gradio_client` library here: https://gradio.app/getting-started-with-the-python-client/\n\n#\n\n#\n\n#\n\n#\n\n## 3.25.0\n\n### New Features:\n\n- Improve error messages when number of inputs/outputs to event handlers mismatch, by [@space-nuko](https://github.com/space-nuko) in [PR 3519](https://github.com/gradio-app/gradio/pull/3519)\n\n- Add `select` listener to Images, allowing users to click on any part of an image and get the coordinates of the click by [@aliabid94](https://github.com/aliabid94) in [PR 3786](https://github.com/gradio-app/gradio/pull/3786).\n\n```python\nwith gr.Blocks() as demo:\n img = gr.Image()\n textbox = gr.Textbox()\n\n def select_handler(img, evt: gr.SelectData):\n selected_pixel = img[evt.index[1], evt.index[0]]\n return f\"Selected pixel: {selected_pixel}\"\n\n img.select(select_handler, img, textbox)\n```\n\n![Recording 2023-04-08 at 17 44 39](https://user-images.githubusercontent.com/7870876/230748572-90a2a8d5-116d-4769-bb53-5516555fbd0f.gif)\n\n### Bug Fixes:\n\n- Increase timeout for sending analytics data by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3647](https://github.com/gradio-app/gradio/pull/3647)\n- Fix bug where http token was not accessed over websocket connections by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3735](https://github.com/gradio-app/gradio/pull/3735)\n- Add ability to specify `rows`, `columns` and `object-fit` in `style()` for `gr.Gallery()` component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3586](https://github.com/gradio-app/gradio/pull/3586)\n- Fix bug where recording an audio file through the microphone resulted in a corrupted file name by [@abidlabs](https://github.com/abidlabs) in [PR 3770](https://github.com/gradio-app/gradio/pull/3770)\n- Added \"ssl_verify\" to blocks.launch method to allow for use of self-signed certs by [@garrettsutula](https://github.com/garrettsutula) in [PR 3873](https://github.com/gradio-app/gradio/pull/3873)\n- Fix bug where iterators where not being reset for processes that terminated early by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3777](https://github.com/gradio-app/gradio/pull/3777)\n- Fix bug where the upload button was not properly handling the `file_count='multiple'` case by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3782](https://github.com/gradio-app/gradio/pull/3782)\n- Fix bug where use Via API button was giving error by [@Devang-C](https://github.com/Devang-C) in [PR 3783](https://github.com/gradio-app/gradio/pull/3783)\n\n### Documentation Changes:\n\n- Fix invalid argument docstrings, by [@akx](https://github.com/akx) in [PR 3740](https://github.com/gradio-app/gradio/pull/3740)\n\n#\n\n#\n\n### Full Changelog:\n\n- Fixed IPv6 listening to work with bracket [::1] notation, by [@dsully](https://github.com/dsully) in [PR 3695](https://github.com/gradio-app/gradio/pull/3695)\n\n#\n\n## 3.24.1\n\n### New Features:\n\n- No changes to highlight.\n\n### Bug Fixes:\n\n- Fixes Chatbot issue where new lines were being created every time a message was sent back and forth by [@aliabid94](https://github.com/aliabid94) in [PR 3717](https://github.com/gradio-app/gradio/pull/3717).\n- Fixes data updating in DataFrame invoking a `select` event once the dataframe has been selected. By [@yiyuezhuo](https://github.com/yiyuezhuo) in [PR 3861](https://github.com/gradio-app/gradio/pull/3861)\n- Fixes false positive warning which is due to too strict type checking by [@yiyuezhuo](https://github.com/yiyuezhuo) in [PR 3837](https://github.com/gradio-app/gradio/pull/3837).\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.24.0\n\n### New Features:\n\n- Trigger the release event when Slider number input is released or unfocused by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3589](https://github.com/gradio-app/gradio/pull/3589)\n- Created Theme Builder, which allows users to create themes without writing any code, by [@aliabid94](https://github.com/aliabid94) in [PR 3664](https://github.com/gradio-app/gradio/pull/3664). Launch by:\n\n ```python\n import gradio as gr\n gr.themes.builder()\n ```\n\n ![Theme Builder](https://user-images.githubusercontent.com/7870876/228204929-d71cbba5-69c2-45b3-bd20-e3a201d98b12.png)\n\n- The `Dropdown` component now has a `allow_custom_value` parameter that lets users type in custom values not in the original list of choices.\n- The `Colorpicker` component now has a `.blur()` event\n\n###### Added a download button for videos! \ud83d\udce5\n\n![download_video](https://user-images.githubusercontent.com/41651716/227009612-9bc5fb72-2a44-4c55-9b7b-a0fa098e7f25.gif)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3581](https://github.com/gradio-app/gradio/pull/3581).\n\n- Trigger the release event when Slider number input is released or unfocused by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3589](https://github.com/gradio-app/gradio/pull/3589)\n\n### Bug Fixes:\n\n- Fixed bug where text for altair plots was not legible in dark mode by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3555](https://github.com/gradio-app/gradio/pull/3555)\n- Fixes `Chatbot` and `Image` components so that files passed during processing are added to a directory where they can be served from, by [@abidlabs](https://github.com/abidlabs) in [PR 3523](https://github.com/gradio-app/gradio/pull/3523)\n- Use Gradio API server to send telemetry using `huggingface_hub` [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3488](https://github.com/gradio-app/gradio/pull/3488)\n- Fixes an an issue where if the Blocks scope was not exited, then State could be shared across sessions, by [@abidlabs](https://github.com/abidlabs) in [PR 3600](https://github.com/gradio-app/gradio/pull/3600)\n- Ensures that `gr.load()` loads and applies the upstream theme, by [@abidlabs](https://github.com/abidlabs) in [PR 3641](https://github.com/gradio-app/gradio/pull/3641)\n- Fixed bug where \"or\" was not being localized in file upload text by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3599](https://github.com/gradio-app/gradio/pull/3599)\n- Fixed bug where chatbot does not autoscroll inside of a tab, row or column by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3637](https://github.com/gradio-app/gradio/pull/3637)\n- Fixed bug where textbox shrinks when `lines` set to larger than 20 by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3637](https://github.com/gradio-app/gradio/pull/3637)\n- Ensure CSS has fully loaded before rendering the application, by [@pngwn](https://github.com/pngwn) in [PR 3573](https://github.com/gradio-app/gradio/pull/3573)\n- Support using an empty list as `gr.Dataframe` value, by [@space-nuko](https://github.com/space-nuko) in [PR 3646](https://github.com/gradio-app/gradio/pull/3646)\n- Fixed `gr.Image` not filling the entire element size, by [@space-nuko](https://github.com/space-nuko) in [PR 3649](https://github.com/gradio-app/gradio/pull/3649)\n- Make `gr.Code` support the `lines` property, by [@space-nuko](https://github.com/space-nuko) in [PR 3651](https://github.com/gradio-app/gradio/pull/3651)\n- Fixes certain `_js` return values being double wrapped in an array, by [@space-nuko](https://github.com/space-nuko) in [PR 3594](https://github.com/gradio-app/gradio/pull/3594)\n- Correct the documentation of `gr.File` component to state that its preprocessing method converts the uploaded file to a temporary file, by @RussellLuo in [PR 3660](https://github.com/gradio-app/gradio/pull/3660)\n- Fixed bug in Serializer ValueError text by [@osanseviero](https://github.com/osanseviero) in [PR 3669](https://github.com/gradio-app/gradio/pull/3669)\n- Fix default parameter argument and `gr.Progress` used in same function, by [@space-nuko](https://github.com/space-nuko) in [PR 3671](https://github.com/gradio-app/gradio/pull/3671)\n- Hide `Remove All` button in `gr.Dropdown` single-select mode by [@space-nuko](https://github.com/space-nuko) in [PR 3678](https://github.com/gradio-app/gradio/pull/3678)\n- Fix broken spaces in docs by [@aliabd](https://github.com/aliabd) in [PR 3698](https://github.com/gradio-app/gradio/pull/3698)\n- Fix items in `gr.Dropdown` besides the selected item receiving a checkmark, by [@space-nuko](https://github.com/space-nuko) in [PR 3644](https://github.com/gradio-app/gradio/pull/3644)\n- Fix several `gr.Dropdown` issues and improve usability, by [@space-nuko](https://github.com/space-nuko) in [PR 3705](https://github.com/gradio-app/gradio/pull/3705)\n\n### Documentation Changes:\n\n- Makes some fixes to the Theme Guide related to naming of variables, by [@abidlabs](https://github.com/abidlabs) in [PR 3561](https://github.com/gradio-app/gradio/pull/3561)\n- Documented `HuggingFaceDatasetJSONSaver` by [@osanseviero](https://github.com/osanseviero) in [PR 3604](https://github.com/gradio-app/gradio/pull/3604)\n- Makes some additions to documentation of `Audio` and `State` components, and fixes the `pictionary` demo by [@abidlabs](https://github.com/abidlabs) in [PR 3611](https://github.com/gradio-app/gradio/pull/3611)\n- Fix outdated sharing your app guide by [@aliabd](https://github.com/aliabd) in [PR 3699](https://github.com/gradio-app/gradio/pull/3699)\n\n### Testing and Infrastructure Changes:\n\n- Removed heavily-mocked tests related to comet_ml, wandb, and mlflow as they added a significant amount of test dependencies that prevented installation of test dependencies on Windows environments. By [@abidlabs](https://github.com/abidlabs) in [PR 3608](https://github.com/gradio-app/gradio/pull/3608)\n- Added Windows continuous integration, by [@space-nuko](https://github.com/space-nuko) in [PR 3628](https://github.com/gradio-app/gradio/pull/3628)\n- Switched linting from flake8 + isort to `ruff`, by [@akx](https://github.com/akx) in [PR 3710](https://github.com/gradio-app/gradio/pull/3710)\n\n#\n\n### Full Changelog:\n\n- Mobile responsive iframes in themes guide by [@aliabd](https://github.com/aliabd) in [PR 3562](https://github.com/gradio-app/gradio/pull/3562)\n- Remove extra $demo from theme guide by [@aliabd](https://github.com/aliabd) in [PR 3563](https://github.com/gradio-app/gradio/pull/3563)\n- Set the theme name to be the upstream repo name when loading from the hub by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3595](https://github.com/gradio-app/gradio/pull/3595)\n- Copy everything in website Dockerfile, fix build issues by [@aliabd](https://github.com/aliabd) in [PR 3659](https://github.com/gradio-app/gradio/pull/3659)\n- Raise error when an event is queued but the queue is not configured by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3640](https://github.com/gradio-app/gradio/pull/3640)\n- Allows users to apss in a string name for a built-in theme, by [@abidlabs](https://github.com/abidlabs) in [PR 3641](https://github.com/gradio-app/gradio/pull/3641)\n- Added `orig_name` to Video output in the backend so that the front end can set the right name for downloaded video files by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3700](https://github.com/gradio-app/gradio/pull/3700)\n\n#\n\n## 3.23.0\n\n### New Features:\n\n###### Theme Sharing!\n\nOnce you have created a theme, you can upload it to the HuggingFace Hub to let others view it, use it, and build off of it! You can also download, reuse, and remix other peoples' themes. See https://gradio.app/theming-guide/ for more details.\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3428](https://github.com/gradio-app/gradio/pull/3428)\n\n### Bug Fixes:\n\n- Removes leading spaces from all lines of code uniformly in the `gr.Code()` component. By [@abidlabs](https://github.com/abidlabs) in [PR 3556](https://github.com/gradio-app/gradio/pull/3556)\n- Fixed broken login page, by [@aliabid94](https://github.com/aliabid94) in [PR 3529](https://github.com/gradio-app/gradio/pull/3529)\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Fix rendering of dropdowns to take more space, and related bugs, by [@aliabid94](https://github.com/aliabid94) in [PR 3549](https://github.com/gradio-app/gradio/pull/3549)\n\n#\n\n## 3.22.1\n\n#\n\n### Bug Fixes:\n\n- Restore label bars by [@aliabid94](https://github.com/aliabid94) in [PR 3507](https://github.com/gradio-app/gradio/pull/3507)\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.22.0\n\n### New Features:\n\n###### Official Theme release\n\nGradio now supports a new theme system, which allows you to customize the look and feel of your app. You can now use the `theme=` kwarg to pass in a prebuilt theme, or customize your own! See https://gradio.app/theming-guide/ for more details. By [@aliabid94](https://github.com/aliabid94) in [PR 3470](https://github.com/gradio-app/gradio/pull/3470) and [PR 3497](https://github.com/gradio-app/gradio/pull/3497)\n\n###### `elem_classes`\n\nAdd keyword argument `elem_classes` to Components to control class names of components, in the same manner as existing `elem_id`.\nBy [@aliabid94](https://github.com/aliabid94) in [PR 3466](https://github.com/gradio-app/gradio/pull/3466)\n\n### Bug Fixes:\n\n- Fixes the File.upload() event trigger which broke as part of the change in how we uploaded files by [@abidlabs](https://github.com/abidlabs) in [PR 3462](https://github.com/gradio-app/gradio/pull/3462)\n- Fixed issue with `gr.Request` object failing to handle dictionaries when nested keys couldn't be converted to variable names [#3454](https://github.com/gradio-app/gradio/issues/3454) by [@radames](https://github.com/radames) in [PR 3459](https://github.com/gradio-app/gradio/pull/3459)\n- Fixed bug where css and client api was not working properly when mounted in a subpath by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3482](https://github.com/gradio-app/gradio/pull/3482)\n\n### Documentation Changes:\n\n- Document gr.Error in the docs by [@aliabd](https://github.com/aliabd) in [PR 3465](https://github.com/gradio-app/gradio/pull/3465)\n\n### Testing and Infrastructure Changes:\n\n- Pinned `pyright==1.1.298` for stability by [@abidlabs](https://github.com/abidlabs) in [PR 3475](https://github.com/gradio-app/gradio/pull/3475)\n- Removed `IOComponent.add_interactive_to_config()` by [@space-nuko](https://github.com/space-nuko) in [PR 3476](https://github.com/gradio-app/gradio/pull/3476)\n- Removed `IOComponent.generate_sample()` by [@space-nuko](https://github.com/space-nuko) in [PR 3475](https://github.com/gradio-app/gradio/pull/3483)\n\n#\n\n### Full Changelog:\n\n- Revert primary button background color in dark mode by [@aliabid94](https://github.com/aliabid94) in [PR 3468](https://github.com/gradio-app/gradio/pull/3468)\n\n#\n\n## 3.21.0\n\n### New Features:\n\n###### Theme Sharing \ud83c\udfa8 \ud83e\udd1d\n\nYou can now share your gradio themes with the world!\n\nAfter creating a theme, you can upload it to the HuggingFace Hub to let others view it, use it, and build off of it!\n\n###### Uploading\n\nThere are two ways to upload a theme, via the theme class instance or the command line.\n\n1. Via the class instance\n\n```python\nmy_theme.push_to_hub(repo_name=\"my_theme\",\n version=\"0.2.0\",\n hf_token=\"...\")\n```\n\n2. Via the command line\n\nFirst save the theme to disk\n\n```python\nmy_theme.dump(filename=\"my_theme.json\")\n```\n\nThen use the `upload_theme` command:\n\n```bash\nupload_theme\\\n\"my_theme.json\"\\\n\"my_theme\"\\\n\"0.2.0\"\\\n\"\"\n```\n\nThe `version` must be a valid [semantic version](https://www.geeksforgeeks.org/introduction-semantic-versioning/) string.\n\nThis creates a space on the huggingface hub to host the theme files and show potential users a preview of your theme.\n\nAn example theme space is here: https://huggingface.co/spaces/freddyaboulton/dracula_revamped\n\n###### Downloading\n\nTo use a theme from the hub, use the `from_hub` method on the `ThemeClass` and pass it to your app:\n\n```python\nmy_theme = gr.Theme.from_hub(\"freddyaboulton/my_theme\")\n\nwith gr.Blocks(theme=my_theme) as demo:\n ....\n```\n\nYou can also pass the theme string directly to `Blocks` or `Interface` (`gr.Blocks(theme=\"freddyaboulton/my_theme\")`)\n\nYou can pin your app to an upstream theme version by using semantic versioning expressions.\n\nFor example, the following would ensure the theme we load from the `my_theme` repo was between versions `0.1.0` and `0.2.0`:\n\n```python\nwith gr.Blocks(theme=\"freddyaboulton/my_theme@>=0.1.0,<0.2.0\") as demo:\n ....\n```\n\nby [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3428](https://github.com/gradio-app/gradio/pull/3428)\n\n###### Code component \ud83e\uddbe\n\nNew code component allows you to enter, edit and display code with full syntax highlighting by [@pngwn](https://github.com/pngwn) in [PR 3421](https://github.com/gradio-app/gradio/pull/3421)\n\n###### The `Chatbot` component now supports audio, video, and images\n\nThe `Chatbot` component now supports audio, video, and images with a simple syntax: simply\npass in a tuple with the URL or filepath (the second optional element of the tuple is alt text), and the image/audio/video will be displayed:\n\n```python\ngr.Chatbot([\n ((\"driving.mp4\",), \"cool video\"),\n ((\"cantina.wav\",), \"cool audio\"),\n ((\"lion.jpg\", \"A lion\"), \"cool pic\"),\n]).style(height=800)\n```\n\n\"image\"\n\nNote: images were previously supported via Markdown syntax and that is still supported for backwards compatibility. By [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3413](https://github.com/gradio-app/gradio/pull/3413)\n\n- Allow consecutive function triggers with `.then` and `.success` by [@aliabid94](https://github.com/aliabid94) in [PR 3430](https://github.com/gradio-app/gradio/pull/3430)\n\n- New code component allows you to enter, edit and display code with full syntax highlighting by [@pngwn](https://github.com/pngwn) in [PR 3421](https://github.com/gradio-app/gradio/pull/3421)\n\n![](https://user-images.githubusercontent.com/12937446/224116643-5cfb94b3-93ce-43ee-bb7b-c25c3b66e0a1.png)\n\n- Added the `.select()` event listener, which also includes event data that can be passed as an argument to a function with type hint `gr.SelectData`. The following components support the `.select()` event listener: Chatbot, CheckboxGroup, Dataframe, Dropdown, File, Gallery, HighlightedText, Label, Radio, TabItem, Tab, Textbox. Example usage:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gallery = gr.Gallery([\"images/1.jpg\", \"images/2.jpg\", \"images/3.jpg\"])\n selected_index = gr.Textbox()\n\n def on_select(evt: gr.SelectData):\n return evt.index\n\n gallery.select(on_select, None, selected_index)\n```\n\nBy [@aliabid94](https://github.com/aliabid94) in [PR 3399](https://github.com/gradio-app/gradio/pull/3399)\n\n- The `Textbox` component now includes a copy button by [@abidlabs](https://github.com/abidlabs) in [PR 3452](https://github.com/gradio-app/gradio/pull/3452)\n\n### Bug Fixes:\n\n- Use `huggingface_hub` to send telemetry on `interface` and `blocks`; eventually to replace segment by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3342](https://github.com/gradio-app/gradio/pull/3342)\n- Ensure load events created by components (randomize for slider, callable values) are never queued unless every is passed by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3391](https://github.com/gradio-app/gradio/pull/3391)\n- Prevent in-place updates of `generic_update` by shallow copying by [@gitgithan](https://github.com/gitgithan) in [PR 3405](https://github.com/gradio-app/gradio/pull/3405) to fix [#3282](https://github.com/gradio-app/gradio/issues/3282)\n- Fix bug caused by not importing `BlockContext` in `utils.py` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3424](https://github.com/gradio-app/gradio/pull/3424)\n- Ensure dropdown does not highlight partial matches by [@pngwn](https://github.com/pngwn) in [PR 3421](https://github.com/gradio-app/gradio/pull/3421)\n- Fix mic button display by [@aliabid94](https://github.com/aliabid94) in [PR 3456](https://github.com/gradio-app/gradio/pull/3456)\n\n### Documentation Changes:\n\n- Added a section on security and access when sharing Gradio apps by [@abidlabs](https://github.com/abidlabs) in [PR 3408](https://github.com/gradio-app/gradio/pull/3408)\n- Add Chinese README by [@uanu2002](https://github.com/uanu2002) in [PR 3394](https://github.com/gradio-app/gradio/pull/3394)\n- Adds documentation for web components by [@abidlabs](https://github.com/abidlabs) in [PR 3407](https://github.com/gradio-app/gradio/pull/3407)\n- Fixed link in Chinese readme by [@eltociear](https://github.com/eltociear) in [PR 3417](https://github.com/gradio-app/gradio/pull/3417)\n- Document Blocks methods by [@aliabd](https://github.com/aliabd) in [PR 3427](https://github.com/gradio-app/gradio/pull/3427)\n- Fixed bug where event handlers were not showing up in documentation by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3434](https://github.com/gradio-app/gradio/pull/3434)\n\n### Testing and Infrastructure Changes:\n\n- Fixes tests that were failing locally but passing on CI by [@abidlabs](https://github.com/abidlabs) in [PR 3411](https://github.com/gradio-app/gradio/pull/3411)\n- Remove codecov from the repo by [@aliabd](https://github.com/aliabd) in [PR 3415](https://github.com/gradio-app/gradio/pull/3415)\n\n#\n\n### Full Changelog:\n\n- Prevent in-place updates of `generic_update` by shallow copying by [@gitgithan](https://github.com/gitgithan) in [PR 3405](https://github.com/gradio-app/gradio/pull/3405) to fix [#3282](https://github.com/gradio-app/gradio/issues/3282)\n- Persist file names of files uploaded through any Gradio component by [@abidlabs](https://github.com/abidlabs) in [PR 3412](https://github.com/gradio-app/gradio/pull/3412)\n- Fix markdown embedded component in docs by [@aliabd](https://github.com/aliabd) in [PR 3410](https://github.com/gradio-app/gradio/pull/3410)\n- Clean up event listeners code by [@aliabid94](https://github.com/aliabid94) in [PR 3420](https://github.com/gradio-app/gradio/pull/3420)\n- Fix css issue with spaces logo by [@aliabd](https://github.com/aliabd) in [PR 3422](https://github.com/gradio-app/gradio/pull/3422)\n- Makes a few fixes to the `JSON` component (show_label parameter, icons) in [@abidlabs](https://github.com/abidlabs) in [PR 3451](https://github.com/gradio-app/gradio/pull/3451)\n\n#\n\n## 3.20.1\n\n### New Features:\n\n- Add `height` kwarg to style in `gr.Chatbot()` component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3369](https://github.com/gradio-app/gradio/pull/3369)\n\n```python\nchatbot = gr.Chatbot().style(height=500)\n```\n\n### Bug Fixes:\n\n- Ensure uploaded images are always shown in the sketch tool by [@pngwn](https://github.com/pngwn) in [PR 3386](https://github.com/gradio-app/gradio/pull/3386)\n- Fixes bug where when if fn is a non-static class member, then self should be ignored as the first param of the fn by [@or25](https://github.com/or25) in [PR #3227](https://github.com/gradio-app/gradio/pull/3227)\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.20.0\n\n### New Features:\n\n###### Release event for Slider\n\nNow you can trigger your python function to run when the slider is released as opposed to every slider change value!\n\nSimply use the `release` method on the slider\n\n```python\nslider.release(function, inputs=[...], outputs=[...], api_name=\"predict\")\n```\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3353](https://github.com/gradio-app/gradio/pull/3353)\n\n###### Dropdown Component Updates\n\nThe standard dropdown component now supports searching for choices. Also when `multiselect` is `True`, you can specify `max_choices` to set the maximum number of choices you want the user to be able to select from the dropdown component.\n\n```python\ngr.Dropdown(label=\"Choose your favorite colors\", choices=[\"red\", \"blue\", \"green\", \"yellow\", \"orange\"], multiselect=True, max_choices=2)\n```\n\nby [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3211](https://github.com/gradio-app/gradio/pull/3211)\n\n###### Download button for images \ud83d\uddbc\ufe0f\n\nOutput images will now automatically have a download button displayed to make it easier to save and share\nthe results of Machine Learning art models.\n\n![download_sketch](https://user-images.githubusercontent.com/41651716/221025113-e693bf41-eabd-42b3-a4f2-26f2708d98fe.gif)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3297](https://github.com/gradio-app/gradio/pull/3297)\n\n- Updated image upload component to accept all image formats, including lossless formats like .webp by [@fienestar](https://github.com/fienestar) in [PR 3225](https://github.com/gradio-app/gradio/pull/3225)\n- Adds a disabled mode to the `gr.Button` component by setting `interactive=False` by [@abidlabs](https://github.com/abidlabs) in [PR 3266](https://github.com/gradio-app/gradio/pull/3266) and [PR 3288](https://github.com/gradio-app/gradio/pull/3288)\n- Adds visual feedback to the when the Flag button is clicked, by [@abidlabs](https://github.com/abidlabs) in [PR 3289](https://github.com/gradio-app/gradio/pull/3289)\n- Adds ability to set `flagging_options` display text and saved flag separately by [@abidlabs](https://github.com/abidlabs) in [PR 3289](https://github.com/gradio-app/gradio/pull/3289)\n- Allow the setting of `brush_radius` for the `Image` component both as a default and via `Image.update()` by [@pngwn](https://github.com/pngwn) in [PR 3277](https://github.com/gradio-app/gradio/pull/3277)\n- Added `info=` argument to form components to enable extra context provided to users, by [@aliabid94](https://github.com/aliabid94) in [PR 3291](https://github.com/gradio-app/gradio/pull/3291)\n- Allow developers to access the username of a logged-in user from the `gr.Request()` object using the `.username` attribute by [@abidlabs](https://github.com/abidlabs) in [PR 3296](https://github.com/gradio-app/gradio/pull/3296)\n- Add `preview` option to `Gallery.style` that launches the gallery in preview mode when first loaded by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3345](https://github.com/gradio-app/gradio/pull/3345)\n\n### Bug Fixes:\n\n- Ensure `mirror_webcam` is always respected by [@pngwn](https://github.com/pngwn) in [PR 3245](https://github.com/gradio-app/gradio/pull/3245)\n- Fix issue where updated markdown links were not being opened in a new tab by [@gante](https://github.com/gante) in [PR 3236](https://github.com/gradio-app/gradio/pull/3236)\n- API Docs Fixes by [@aliabd](https://github.com/aliabd) in [PR 3287](https://github.com/gradio-app/gradio/pull/3287)\n- Added a timeout to queue messages as some demos were experiencing infinitely growing queues from active jobs waiting forever for clients to respond by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3196](https://github.com/gradio-app/gradio/pull/3196)\n- Fixes the height of rendered LaTeX images so that they match the height of surrounding text by [@abidlabs](https://github.com/abidlabs) in [PR 3258](https://github.com/gradio-app/gradio/pull/3258) and in [PR 3276](https://github.com/gradio-app/gradio/pull/3276)\n- Fix bug where matplotlib images where always too small on the front end by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3274](https://github.com/gradio-app/gradio/pull/3274)\n- Remove embed's `initial_height` when loading is complete so the embed finds its natural height once it is loaded [@pngwn](https://github.com/pngwn) in [PR 3292](https://github.com/gradio-app/gradio/pull/3292)\n- Prevent Sketch from crashing when a default image is provided by [@pngwn](https://github.com/pngwn) in [PR 3277](https://github.com/gradio-app/gradio/pull/3277)\n- Respect the `shape` argument on the front end when creating Image Sketches by [@pngwn](https://github.com/pngwn) in [PR 3277](https://github.com/gradio-app/gradio/pull/3277)\n- Fix infinite loop caused by setting `Dropdown's` value to be `[]` and adding a change event on the dropdown by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3295](https://github.com/gradio-app/gradio/pull/3295)\n- Fix change event listed twice in image docs by [@aliabd](https://github.com/aliabd) in [PR 3318](https://github.com/gradio-app/gradio/pull/3318)\n- Fix bug that cause UI to be vertically centered at all times by [@pngwn](https://github.com/pngwn) in [PR 3336](https://github.com/gradio-app/gradio/pull/3336)\n- Fix bug where `height` set in `Gallery.style` was not respected by the front-end by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3343](https://github.com/gradio-app/gradio/pull/3343)\n- Ensure markdown lists are rendered correctly by [@pngwn](https://github.com/pngwn) in [PR 3341](https://github.com/gradio-app/gradio/pull/3341)\n- Ensure that the initial empty value for `gr.Dropdown(Multiselect=True)` is an empty list and the initial value for `gr.Dropdown(Multiselect=False)` is an empty string by [@pngwn](https://github.com/pngwn) in [PR 3338](https://github.com/gradio-app/gradio/pull/3338)\n- Ensure uploaded images respect the shape property when the canvas is also enabled by [@pngwn](https://github.com/pngwn) in [PR 3351](https://github.com/gradio-app/gradio/pull/3351)\n- Ensure that Google Analytics works correctly when gradio apps are created with `analytics_enabled=True` by [@abidlabs](https://github.com/abidlabs) in [PR 3349](https://github.com/gradio-app/gradio/pull/3349)\n- Fix bug where files were being re-uploaded after updates by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3375](https://github.com/gradio-app/gradio/pull/3375)\n- Fix error when using backen_fn and custom js at the same time by [@jialeicui](https://github.com/jialeicui) in [PR 3358](https://github.com/gradio-app/gradio/pull/3358)\n- Support new embeds for huggingface spaces subdomains by [@pngwn](https://github.com/pngwn) in [PR 3367](https://github.com/gradio-app/gradio/pull/3367)\n\n### Documentation Changes:\n\n- Added the `types` field to the dependency field in the config by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3315](https://github.com/gradio-app/gradio/pull/3315)\n- Gradio Status Page by [@aliabd](https://github.com/aliabd) in [PR 3331](https://github.com/gradio-app/gradio/pull/3331)\n- Adds a Guide on setting up a dashboard from Supabase data using the `gr.BarPlot`\n component by [@abidlabs](https://github.com/abidlabs) in [PR 3275](https://github.com/gradio-app/gradio/pull/3275)\n\n### Testing and Infrastructure Changes:\n\n- Adds a script to benchmark the performance of the queue and adds some instructions on how to use it. By [@freddyaboulton](https://github.com/freddyaboulton) and [@abidlabs](https://github.com/abidlabs) in [PR 3272](https://github.com/gradio-app/gradio/pull/3272)\n- Flaky python tests no longer cancel non-flaky tests by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3344](https://github.com/gradio-app/gradio/pull/3344)\n\n### Breaking Changes:\n\n- Chatbot bubble colors can no longer be set by `chatbot.style(color_map=)` by [@aliabid94] in [PR 3370](https://github.com/gradio-app/gradio/pull/3370)\n\n### Full Changelog:\n\n- Fixed comment typo in components.py by [@eltociear](https://github.com/eltociear) in [PR 3235](https://github.com/gradio-app/gradio/pull/3235)\n- Cleaned up chatbot ui look and feel by [@aliabid94] in [PR 3370](https://github.com/gradio-app/gradio/pull/3370)\n\n#\n\n## 3.19.1\n\n#\n\n### Bug Fixes:\n\n- UI fixes including footer and API docs by [@aliabid94](https://github.com/aliabid94) in [PR 3242](https://github.com/gradio-app/gradio/pull/3242)\n- Updated image upload component to accept all image formats, including lossless formats like .webp by [@fienestar](https://github.com/fienestar) in [PR 3225](https://github.com/gradio-app/gradio/pull/3225)\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Added backend support for themes by [@aliabid94](https://github.com/aliabid94) in [PR 2931](https://github.com/gradio-app/gradio/pull/2931)\n- Added support for button sizes \"lg\" (default) and \"sm\".\n\n#\n\n## 3.19.0\n\n### New Features:\n\n###### Improved embedding experience\n\nWhen embedding a spaces-hosted gradio app as a web component, you now get an improved UI linking back to the original space, better error handling and more intelligent load performance. No changes are required to your code to benefit from this enhanced experience; simply upgrade your gradio SDK to the latest version.\n\n![](https://user-images.githubusercontent.com/12937446/219653294-86937632-72c1-4e93-a77c-af705d49382a.png)\n\nThis behaviour is configurable. You can disable the info panel at the bottom by passing `info=\"false\"`. You can disable the container entirely by passing `container=\"false\"`.\n\nError statuses are reported in the UI with an easy way for end-users to report problems to the original space author via the community tab of that Hugginface space:\n\n![](https://user-images.githubusercontent.com/12937446/219655499-88019443-d694-44e7-9e6d-242e19d10a5c.png)\n\nBy default, gradio apps are lazy loaded, vastly improving performance when there are several demos on the page. Metadata is loaded ahead of time, but the space will only be loaded and rendered when it is in view.\n\nThis behaviour is configurable. You can pass `eager=\"true\"` to load and render the space regardless of whether or not it is currently on the screen.\n\nby [@pngwn](https://github.com/pngwn) in [PR 3205](https://github.com/gradio-app/gradio/pull/3205)\n\n###### New `gr.BarPlot` component! \ud83d\udcca\n\nCreate interactive bar plots from a high-level interface with `gr.BarPlot`.\nNo need to remember matplotlib syntax anymore!\n\nExample usage:\n\n```python\nimport gradio as gr\nimport pandas as pd\n\nsimple = pd.DataFrame({\n 'a': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],\n 'b': [28, 55, 43, 91, 81, 53, 19, 87, 52]\n})\n\nwith gr.Blocks() as demo:\n gr.BarPlot(\n simple,\n x=\"a\",\n y=\"b\",\n title=\"Simple Bar Plot with made up data\",\n tooltip=['a', 'b'],\n )\n\ndemo.launch()\n```\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3157](https://github.com/gradio-app/gradio/pull/3157)\n\n###### Bokeh plots are back! \ud83c\udf20\n\nFixed a bug that prevented bokeh plots from being displayed on the front end and extended support for both 2.x and 3.x versions of bokeh!\n\n![image](https://user-images.githubusercontent.com/41651716/219468324-0d82e07f-8fb4-4ff9-b40c-8250b29e45f7.png)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3212](https://github.com/gradio-app/gradio/pull/3212)\n\n### Bug Fixes:\n\n- Adds ability to add a single message from the bot or user side. Ex: specify `None` as the second value in the tuple, to add a single message in the chatbot from the \"bot\" side.\n\n```python\ngr.Chatbot([(\"Hi, I'm DialoGPT. Try asking me a question.\", None)])\n```\n\nBy [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3165](https://github.com/gradio-app/gradio/pull/3165)\n\n- Fixes `gr.utils.delete_none` to only remove props whose values are `None` from the config by [@abidlabs](https://github.com/abidlabs) in [PR 3188](https://github.com/gradio-app/gradio/pull/3188)\n- Fix bug where embedded demos were not loading files properly by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3177](https://github.com/gradio-app/gradio/pull/3177)\n- The `change` event is now triggered when users click the 'Clear All' button of the multiselect DropDown component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3195](https://github.com/gradio-app/gradio/pull/3195)\n- Stops File component from freezing when a large file is uploaded by [@aliabid94](https://github.com/aliabid94) in [PR 3191](https://github.com/gradio-app/gradio/pull/3191)\n- Support Chinese pinyin in Dataframe by [@aliabid94](https://github.com/aliabid94) in [PR 3206](https://github.com/gradio-app/gradio/pull/3206)\n- The `clear` event is now triggered when images are cleared by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3218](https://github.com/gradio-app/gradio/pull/3218)\n- Fix bug where auth cookies where not sent when connecting to an app via http by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3223](https://github.com/gradio-app/gradio/pull/3223)\n- Ensure latext CSS is always applied in light and dark mode by [@pngwn](https://github.com/pngwn) in [PR 3233](https://github.com/gradio-app/gradio/pull/3233)\n\n### Documentation Changes:\n\n- Sort components in docs by alphabetic order by [@aliabd](https://github.com/aliabd) in [PR 3152](https://github.com/gradio-app/gradio/pull/3152)\n- Changes to W&B guide by [@scottire](https://github.com/scottire) in [PR 3153](https://github.com/gradio-app/gradio/pull/3153)\n- Keep pnginfo metadata for gallery by [@wfng92](https://github.com/wfng92) in [PR 3150](https://github.com/gradio-app/gradio/pull/3150)\n- Add a section on how to run a Gradio app locally [@osanseviero](https://github.com/osanseviero) in [PR 3170](https://github.com/gradio-app/gradio/pull/3170)\n- Fixed typos in gradio events function documentation by [@vidalmaxime](https://github.com/vidalmaxime) in [PR 3168](https://github.com/gradio-app/gradio/pull/3168)\n- Added an example using Gradio's batch mode with the diffusers library by [@abidlabs](https://github.com/abidlabs) in [PR 3224](https://github.com/gradio-app/gradio/pull/3224)\n\n#\n\n#\n\n### Full Changelog:\n\n- Fix demos page css and add close demos button by [@aliabd](https://github.com/aliabd) in [PR 3151](https://github.com/gradio-app/gradio/pull/3151)\n- Caches temp files from base64 input data by giving them a deterministic path based on the contents of data by [@abidlabs](https://github.com/abidlabs) in [PR 3197](https://github.com/gradio-app/gradio/pull/3197)\n- Better warnings (when there is a mismatch between the number of output components and values returned by a function, or when the `File` component or `UploadButton` component includes a `file_types` parameter along with `file_count==\"dir\"`) by [@abidlabs](https://github.com/abidlabs) in [PR 3194](https://github.com/gradio-app/gradio/pull/3194)\n- Raises a `gr.Error` instead of a regular Python error when you use `gr.Interface.load()` to load a model and there's an error querying the HF API by [@abidlabs](https://github.com/abidlabs) in [PR 3194](https://github.com/gradio-app/gradio/pull/3194)\n- Fixed gradio share links so that they are persistent and do not reset if network\n connection is disrupted by by [XciD](https://github.com/XciD), [Wauplin](https://github.com/Wauplin), and [@abidlabs](https://github.com/abidlabs) in [PR 3149](https://github.com/gradio-app/gradio/pull/3149) and a follow-up to allow it to work for users upgrading from a previous Gradio version in [PR 3221](https://github.com/gradio-app/gradio/pull/3221)\n\n#\n\n## 3.18.0\n\n### New Features:\n\n###### Revamped Stop Button for Interfaces \ud83d\uded1\n\nIf your Interface function is a generator, there used to be a separate `Stop` button displayed next\nto the `Submit` button.\n\nWe've revamed the `Submit` button so that it turns into a `Stop` button during the generation process.\nClicking on the `Stop` button will cancel the generation and turn it back to a `Submit` button.\nThe `Stop` button will automatically turn back to a `Submit` button at the end of the generation if you don't use it!\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3124](https://github.com/gradio-app/gradio/pull/3124)\n\n###### Queue now works with reload mode!\n\nYou can now call `queue` on your `demo` outside of the `if __name__ == \"__main__\"` block and\nrun the script in reload mode with the `gradio` command.\n\nAny changes to the `app.py` file will be reflected in the webpage automatically and the queue will work\nproperly!\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3089)\n\n###### Allow serving files from additional directories\n\n```python\ndemo = gr.Interface(...)\ndemo.launch(\n file_directories=[\"/var/lib/demo/path/to/resources\"]\n)\n```\n\nBy [@maxaudron](https://github.com/maxaudron) in [PR 3075](https://github.com/gradio-app/gradio/pull/3075)\n\n### Bug Fixes:\n\n- Fixes URL resolution on Windows by [@abidlabs](https://github.com/abidlabs) in [PR 3108](https://github.com/gradio-app/gradio/pull/3108)\n- Example caching now works with components without a label attribute (e.g. `Column`) by [@abidlabs](https://github.com/abidlabs) in [PR 3123](https://github.com/gradio-app/gradio/pull/3123)\n- Ensure the Video component correctly resets the UI state when a new video source is loaded and reduce choppiness of UI by [@pngwn](https://github.com/abidlabs) in [PR 3117](https://github.com/gradio-app/gradio/pull/3117)\n- Fixes loading private Spaces by [@abidlabs](https://github.com/abidlabs) in [PR 3068](https://github.com/gradio-app/gradio/pull/3068)\n- Added a warning when attempting to launch an `Interface` via the `%%blocks` jupyter notebook magic command by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3126](https://github.com/gradio-app/gradio/pull/3126)\n- Fixes bug where interactive output image cannot be set when in edit mode by [@dawoodkhan82](https://github.com/@dawoodkhan82) in [PR 3135](https://github.com/gradio-app/gradio/pull/3135)\n- A share link will automatically be created when running on Sagemaker notebooks so that the front-end is properly displayed by [@abidlabs](https://github.com/abidlabs) in [PR 3137](https://github.com/gradio-app/gradio/pull/3137)\n- Fixes a few dropdown component issues; hide checkmark next to options as expected, and keyboard hover is visible by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3145]https://github.com/gradio-app/gradio/pull/3145)\n- Fixed bug where example pagination buttons were not visible in dark mode or displayed under the examples table. By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3144](https://github.com/gradio-app/gradio/pull/3144)\n- Fixed bug where the font color of axis labels and titles for native plots did not respond to dark mode preferences. By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3146](https://github.com/gradio-app/gradio/pull/3146)\n\n### Documentation Changes:\n\n- Added a guide on the 4 kinds of Gradio Interfaces by [@yvrjsharma](https://github.com/yvrjsharma) and [@abidlabs](https://github.com/abidlabs) in [PR 3003](https://github.com/gradio-app/gradio/pull/3003)\n- Explained that the parameters in `launch` will not be respected when using reload mode, e.g. `gradio` command by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3089)\n- Added a demo to show how to set up variable numbers of outputs in Gradio by [@abidlabs](https://github.com/abidlabs) in [PR 3127](https://github.com/gradio-app/gradio/pull/3127)\n- Updated docs to reflect that the `equal_height` parameter should be passed to the `.style()` method of `gr.Row()` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3125](https://github.com/gradio-app/gradio/pull/3125)\n\n#\n\n#\n\n### Full Changelog:\n\n- Changed URL of final image for `fake_diffusion` demos by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3120](https://github.com/gradio-app/gradio/pull/3120)\n\n#\n\n## 3.17.1\n\n### New Features:\n\n###### iOS image rotation fixed \ud83d\udd04\n\nPreviously photos uploaded via iOS would be rotated after processing. This has been fixed by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3091)\n\n######### Before\n\n![image](https://user-images.githubusercontent.com/41651716/215846507-a36e9d05-1ac2-4867-8ab3-ce045a9415d9.png)\n\n######### After\n\n![image](https://user-images.githubusercontent.com/41651716/215846554-e41773ed-70f0-491a-9952-6a18babf91ef.png)\n\n###### Run on Kaggle kernels \ud83e\uddea\n\nA share link will automatically be created when running on Kaggle kernels (notebooks) so that the front-end is properly displayed.\n\n![image](https://user-images.githubusercontent.com/41651716/216104254-2cf55599-449c-436c-b57e-40f6a83f9eee.png)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3101](https://github.com/gradio-app/gradio/pull/3101)\n\n### Bug Fixes:\n\n- Fix bug where examples were not rendered correctly for demos created with Blocks api that had multiple input compinents by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3090](https://github.com/gradio-app/gradio/pull/3090)\n- Fix change event listener for JSON, HighlightedText, Chatbot by [@aliabid94](https://github.com/aliabid94) in [PR 3095](https://github.com/gradio-app/gradio/pull/3095)\n- Fixes bug where video and file change event not working [@tomchang25](https://github.com/tomchang25) in [PR 3098](https://github.com/gradio-app/gradio/pull/3098)\n- Fixes bug where static_video play and pause event not working [@tomchang25](https://github.com/tomchang25) in [PR 3098](https://github.com/gradio-app/gradio/pull/3098)\n- Fixed `Gallery.style(grid=...)` by by [@aliabd](https://github.com/aliabd) in [PR 3107](https://github.com/gradio-app/gradio/pull/3107)\n\n### Documentation Changes:\n\n- Update chatbot guide to include blocks demo and markdown support section by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3023](https://github.com/gradio-app/gradio/pull/3023)\n\n* Fix a broken link in the Quick Start guide, by [@cakiki](https://github.com/cakiki) in [PR 3109](https://github.com/gradio-app/gradio/pull/3109)\n* Better docs navigation on mobile by [@aliabd](https://github.com/aliabd) in [PR 3112](https://github.com/gradio-app/gradio/pull/3112)\n* Add a guide on using Gradio with [Comet](https://comet.com/), by [@DN6](https://github.com/DN6/) in [PR 3058](https://github.com/gradio-app/gradio/pull/3058)\n\n#\n\n#\n\n### Full Changelog:\n\n- Set minimum `markdown-it-py` version to `2.0.0` so that the dollar math plugin is compatible by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3102](https://github.com/gradio-app/gradio/pull/3102)\n\n#\n\n## 3.17.0\n\n### New Features:\n\n###### Extended support for Interface.load! \ud83c\udfd7\ufe0f\n\nYou can now load `image-to-text` and `conversational` pipelines from the hub!\n\n###### Image-to-text Demo\n\n```python\nio = gr.Interface.load(\"models/nlpconnect/vit-gpt2-image-captioning\",\n api_key=\"\")\nio.launch()\n```\n\n\"image\"\n\n###### conversational Demo\n\n```python\nchatbot = gr.Interface.load(\"models/microsoft/DialoGPT-medium\",\n api_key=\"\")\nchatbot.launch()\n```\n\n![chatbot_load](https://user-images.githubusercontent.com/41651716/213260220-3eaa25b7-a38b-48c6-adeb-2718bdf297a2.gif)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3011](https://github.com/gradio-app/gradio/pull/3011)\n\n###### Download Button added to Model3D Output Component \ud83d\udce5\n\nNo need for an additional file output component to enable model3d file downloads anymore. We now added a download button to the model3d component itself.\n\n\"Screenshot\n\nBy [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3014](https://github.com/gradio-app/gradio/pull/3014)\n\n###### Fixing Auth on Spaces \ud83d\udd11\n\nAuthentication on spaces works now! Third party cookies must be enabled on your browser to be able\nto log in. Some browsers disable third party cookies by default (Safari, Chrome Incognito).\n\n![auth_spaces](https://user-images.githubusercontent.com/41651716/215528417-09538933-0576-4d1d-b3b9-1e877ab01905.gif)\n\n### Bug Fixes:\n\n- Fixes bug where interpretation event was not configured correctly by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2993](https://github.com/gradio-app/gradio/pull/2993)\n- Fix relative import bug in reload mode by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2992](https://github.com/gradio-app/gradio/pull/2992)\n- Fixes bug where png files were not being recognized when uploading images by [@abidlabs](https://github.com/abidlabs) in [PR 3002](https://github.com/gradio-app/gradio/pull/3002)\n- Fixes bug where external Spaces could not be loaded and used as functions if they returned files by [@abidlabs](https://github.com/abidlabs) in [PR 3004](https://github.com/gradio-app/gradio/pull/3004)\n- Fix bug where file serialization output was not JSON serializable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2999](https://github.com/gradio-app/gradio/pull/2999)\n- Fixes bug where png files were not being recognized when uploading images by [@abidlabs](https://github.com/abidlabs) in [PR 3002](https://github.com/gradio-app/gradio/pull/3002)\n- Fixes bug where temporary uploaded files were not being added to temp sets by [@abidlabs](https://github.com/abidlabs) in [PR 3005](https://github.com/gradio-app/gradio/pull/3005)\n- Fixes issue where markdown support in chatbot breaks older demos [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3006](https://github.com/gradio-app/gradio/pull/3006)\n- Fixes the `/file/` route that was broken in a recent change in [PR 3010](https://github.com/gradio-app/gradio/pull/3010)\n- Fix bug where the Image component could not serialize image urls by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2957](https://github.com/gradio-app/gradio/pull/2957)\n- Fix forwarding for guides after SEO renaming by [@aliabd](https://github.com/aliabd) in [PR 3017](https://github.com/gradio-app/gradio/pull/3017)\n- Switch all pages on the website to use latest stable gradio by [@aliabd](https://github.com/aliabd) in [PR 3016](https://github.com/gradio-app/gradio/pull/3016)\n- Fix bug related to deprecated parameters in `huggingface_hub` for the HuggingFaceDatasetSaver in [PR 3025](https://github.com/gradio-app/gradio/pull/3025)\n- Added better support for symlinks in the way absolute paths are resolved by [@abidlabs](https://github.com/abidlabs) in [PR 3037](https://github.com/gradio-app/gradio/pull/3037)\n- Fix several minor frontend bugs (loading animation, examples as gallery) frontend [@aliabid94](https://github.com/3026) in [PR 2961](https://github.com/gradio-app/gradio/pull/3026).\n- Fixes bug that the chatbot sample code does not work with certain input value by [@petrov826](https://github.com/petrov826) in [PR 3039](https://github.com/gradio-app/gradio/pull/3039).\n- Fix shadows for form element and ensure focus styles more visible in dark mode [@pngwn](https://github.com/pngwn) in [PR 3042](https://github.com/gradio-app/gradio/pull/3042).\n- Fixed bug where the Checkbox and Dropdown change events were not triggered in response to other component changes by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3045](https://github.com/gradio-app/gradio/pull/3045)\n- Fix bug where the queue was not properly restarted after launching a `closed` app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3022](https://github.com/gradio-app/gradio/pull/3022)\n- Adding missing embedded components on docs by [@aliabd](https://github.com/aliabd) in [PR 3027](https://github.com/gradio-app/gradio/pull/3027)\n- Fixes bug where app would crash if the `file_types` parameter of `gr.File` or `gr.UploadButton` was not a list by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3048](https://github.com/gradio-app/gradio/pull/3048)\n- Ensure CSS mounts correctly regardless of how many Gradio instances are on the page [@pngwn](https://github.com/pngwn) in [PR 3059](https://github.com/gradio-app/gradio/pull/3059).\n- Fix bug where input component was not hidden in the frontend for `UploadButton` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3053](https://github.com/gradio-app/gradio/pull/3053)\n- Fixes issue where after clicking submit or undo, the sketch output wouldn't clear. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3047](https://github.com/gradio-app/gradio/pull/3047)\n- Ensure spaces embedded via the web component always use the correct URLs for server requests and change ports for testing to avoid strange collisions when users are working with embedded apps locally by [@pngwn](https://github.com/pngwn) in [PR 3065](https://github.com/gradio-app/gradio/pull/3065)\n- Preserve selected image of Gallery through updated by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3061](https://github.com/gradio-app/gradio/pull/3061)\n- Fix bug where auth was not respected on HF spaces by [@freddyaboulton](https://github.com/freddyaboulton) and [@aliabid94](https://github.com/aliabid94) in [PR 3049](https://github.com/gradio-app/gradio/pull/3049)\n- Fixes bug where tabs selected attribute not working if manually change tab by [@tomchang25](https://github.com/tomchang25) in [3055](https://github.com/gradio-app/gradio/pull/3055)\n- Change chatbot to show dots on progress, and fix bug where chatbot would not stick to bottom in the case of images by [@aliabid94](https://github.com/aliabid94) in [PR 3067](https://github.com/gradio-app/gradio/pull/3079)\n\n### Documentation Changes:\n\n- SEO improvements to guides by[@aliabd](https://github.com/aliabd) in [PR 2915](https://github.com/gradio-app/gradio/pull/2915)\n- Use `gr.LinePlot` for the `blocks_kinematics` demo by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2998](https://github.com/gradio-app/gradio/pull/2998)\n- Updated the `interface_series_load` to include some inline markdown code by [@abidlabs](https://github.com/abidlabs) in [PR 3051](https://github.com/gradio-app/gradio/pull/3051)\n\n### Testing and Infrastructure Changes:\n\n- Adds a GitHub action to test if any large files (> 5MB) are present by [@abidlabs](https://github.com/abidlabs) in [PR 3013](https://github.com/gradio-app/gradio/pull/3013)\n\n#\n\n### Full Changelog:\n\n- Rewrote frontend using CSS variables for themes by [@pngwn](https://github.com/pngwn) in [PR 2840](https://github.com/gradio-app/gradio/pull/2840)\n- Moved telemetry requests to run on background threads by [@abidlabs](https://github.com/abidlabs) in [PR 3054](https://github.com/gradio-app/gradio/pull/3054)\n\n#\n\n## 3.16.2\n\n#\n\n### Bug Fixes:\n\n- Fixed file upload fails for files with zero size by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2923](https://github.com/gradio-app/gradio/pull/2923)\n- Fixed bug where `mount_gradio_app` would not launch if the queue was enabled in a gradio app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2939](https://github.com/gradio-app/gradio/pull/2939)\n- Fix custom long CSS handling in Blocks by [@anton-l](https://github.com/anton-l) in [PR 2953](https://github.com/gradio-app/gradio/pull/2953)\n- Recovers the dropdown change event by [@abidlabs](https://github.com/abidlabs) in [PR 2954](https://github.com/gradio-app/gradio/pull/2954).\n- Fix audio file output by [@aliabid94](https://github.com/aliabid94) in [PR 2961](https://github.com/gradio-app/gradio/pull/2961).\n- Fixed bug where file extensions of really long files were not kept after download by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2929](https://github.com/gradio-app/gradio/pull/2929)\n- Fix bug where outputs for examples where not being returned by the backend by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2955](https://github.com/gradio-app/gradio/pull/2955)\n- Fix bug in `blocks_plug` demo that prevented switching tabs programmatically with python [@TashaSkyUp](https://github.com/https://github.com/TashaSkyUp) in [PR 2971](https://github.com/gradio-app/gradio/pull/2971).\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.16.1\n\n#\n\n### Bug Fixes:\n\n- Fix audio file output by [@aliabid94](https://github.com/aliabid94) in [PR 2950](https://github.com/gradio-app/gradio/pull/2950).\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.16.0\n\n### New Features:\n\n###### Send custom progress updates by adding a `gr.Progress` argument after the input arguments to any function. Example:\n\n```python\ndef reverse(word, progress=gr.Progress()):\n progress(0, desc=\"Starting\")\n time.sleep(1)\n new_string = \"\"\n for letter in progress.tqdm(word, desc=\"Reversing\"):\n time.sleep(0.25)\n new_string = letter + new_string\n return new_string\n\ndemo = gr.Interface(reverse, gr.Text(), gr.Text())\n```\n\nProgress indicator bar by [@aliabid94](https://github.com/aliabid94) in [PR 2750](https://github.com/gradio-app/gradio/pull/2750).\n\n- Added `title` argument to `TabbedInterface` by @MohamedAliRashad in [#2888](https://github.com/gradio-app/gradio/pull/2888)\n- Add support for specifying file extensions for `gr.File` and `gr.UploadButton`, using `file_types` parameter (e.g `gr.File(file_count=\"multiple\", file_types=[\"text\", \".json\", \".csv\"])`) by @dawoodkhan82 in [#2901](https://github.com/gradio-app/gradio/pull/2901)\n- Added `multiselect` option to `Dropdown` by @dawoodkhan82 in [#2871](https://github.com/gradio-app/gradio/pull/2871)\n\n###### With `multiselect` set to `true` a user can now select multiple options from the `gr.Dropdown` component.\n\n```python\ngr.Dropdown([\"angola\", \"pakistan\", \"canada\"], multiselect=True, value=[\"angola\"])\n```\n\n\"Screenshot\n\n### Bug Fixes:\n\n- Fixed bug where an error opening an audio file led to a crash by [@FelixDombek](https://github.com/FelixDombek) in [PR 2898](https://github.com/gradio-app/gradio/pull/2898)\n- Fixed bug where setting `default_enabled=False` made it so that the entire queue did not start by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2876](https://github.com/gradio-app/gradio/pull/2876)\n- Fixed bug where csv preview for DataFrame examples would show filename instead of file contents by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2877](https://github.com/gradio-app/gradio/pull/2877)\n- Fixed bug where an error raised after yielding iterative output would not be displayed in the browser by\n [@JaySmithWpg](https://github.com/JaySmithWpg) in [PR 2889](https://github.com/gradio-app/gradio/pull/2889)\n- Fixed bug in `blocks_style` demo that was preventing it from launching by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2890](https://github.com/gradio-app/gradio/pull/2890)\n- Fixed bug where files could not be downloaded by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2926](https://github.com/gradio-app/gradio/pull/2926)\n- Fixed bug where cached examples were not displaying properly by [@a-rogalska](https://github.com/a-rogalska) in [PR 2974](https://github.com/gradio-app/gradio/pull/2974)\n\n### Documentation Changes:\n\n- Added a Guide on using Google Sheets to create a real-time dashboard with Gradio's `DataFrame` and `LinePlot` component, by [@abidlabs](https://github.com/abidlabs) in [PR 2816](https://github.com/gradio-app/gradio/pull/2816)\n- Add a components - events matrix on the docs by [@aliabd](https://github.com/aliabd) in [PR 2921](https://github.com/gradio-app/gradio/pull/2921)\n\n### Testing and Infrastructure Changes:\n\n- Deployed PRs from forks to spaces by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2895](https://github.com/gradio-app/gradio/pull/2895)\n\n#\n\n### Full Changelog:\n\n- The `default_enabled` parameter of the `Blocks.queue` method has no effect by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2876](https://github.com/gradio-app/gradio/pull/2876)\n- Added typing to several Python files in codebase by [@abidlabs](https://github.com/abidlabs) in [PR 2887](https://github.com/gradio-app/gradio/pull/2887)\n- Excluding untracked files from demo notebook check action by [@aliabd](https://github.com/aliabd) in [PR 2897](https://github.com/gradio-app/gradio/pull/2897)\n- Optimize images and gifs by [@aliabd](https://github.com/aliabd) in [PR 2922](https://github.com/gradio-app/gradio/pull/2922)\n- Updated typing by [@1nF0rmed](https://github.com/1nF0rmed) in [PR 2904](https://github.com/gradio-app/gradio/pull/2904)\n\n### Contributors Shoutout:\n\n- @JaySmithWpg for making their first contribution to gradio!\n- @MohamedAliRashad for making their first contribution to gradio!\n\n## 3.15.0\n\n### New Features:\n\nGradio's newest plotting component `gr.LinePlot`! \ud83d\udcc8\n\nWith this component you can easily create time series visualizations with customizable\nappearance for your demos and dashboards ... all without having to know an external plotting library.\n\nFor an example of the api see below:\n\n```python\ngr.LinePlot(stocks,\n x=\"date\",\n y=\"price\",\n color=\"symbol\",\n color_legend_position=\"bottom\",\n width=600, height=400, title=\"Stock Prices\")\n```\n\n![image](https://user-images.githubusercontent.com/41651716/208711646-81ae3745-149b-46a3-babd-0569aecdd409.png)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2807](https://github.com/gradio-app/gradio/pull/2807)\n\n### Bug Fixes:\n\n- Fixed bug where the `examples_per_page` parameter of the `Examples` component was not passed to the internal `Dataset` component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2861](https://github.com/gradio-app/gradio/pull/2861)\n- Fixes loading Spaces that have components with default values by [@abidlabs](https://github.com/abidlabs) in [PR 2855](https://github.com/gradio-app/gradio/pull/2855)\n- Fixes flagging when `allow_flagging=\"auto\"` in `gr.Interface()` by [@abidlabs](https://github.com/abidlabs) in [PR 2695](https://github.com/gradio-app/gradio/pull/2695)\n- Fixed bug where passing a non-list value to `gr.CheckboxGroup` would crash the entire app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2866](https://github.com/gradio-app/gradio/pull/2866)\n\n### Documentation Changes:\n\n- Added a Guide on using BigQuery with Gradio's `DataFrame` and `ScatterPlot` component,\n by [@abidlabs](https://github.com/abidlabs) in [PR 2794](https://github.com/gradio-app/gradio/pull/2794)\n\n#\n\n#\n\n### Full Changelog:\n\n- Fixed importing gradio can cause PIL.Image.registered_extensions() to break by `[@aliencaocao](https://github.com/aliencaocao)` in `[PR 2846](https://github.com/gradio-app/gradio/pull/2846)`\n- Fix css glitch and navigation in docs by [@aliabd](https://github.com/aliabd) in [PR 2856](https://github.com/gradio-app/gradio/pull/2856)\n- Added the ability to set `x_lim`, `y_lim` and legend positions for `gr.ScatterPlot` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2807](https://github.com/gradio-app/gradio/pull/2807)\n- Remove footers and min-height the correct way by [@aliabd](https://github.com/aliabd) in [PR 2860](https://github.com/gradio-app/gradio/pull/2860)\n\n#\n\n## 3.14.0\n\n### New Features:\n\n###### Add Waveform Visual Support to Audio\n\nAdds a `gr.make_waveform()` function that creates a waveform video by combining an audio and an optional background image by [@dawoodkhan82](http://github.com/dawoodkhan82) and [@aliabid94](http://github.com/aliabid94) in [PR 2706](https://github.com/gradio-app/gradio/pull/2706. Helpful for making audio outputs much more shareable.\n\n![waveform screenrecording](https://user-images.githubusercontent.com/7870876/206062396-164a5e71-451a-4fe0-94a7-cbe9269d57e6.gif)\n\n###### Allows Every Component to Accept an `every` Parameter\n\nWhen a component's initial value is a function, the `every` parameter re-runs the function every `every` seconds. By [@abidlabs](https://github.com/abidlabs) in [PR 2806](https://github.com/gradio-app/gradio/pull/2806). Here's a code example:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n df = gr.DataFrame(run_query, every=60*60)\n\ndemo.queue().launch()\n```\n\n### Bug Fixes:\n\n- Fixed issue where too many temporary files were created, all with randomly generated\n filepaths. Now fewer temporary files are created and are assigned a path that is a\n hash based on the file contents by [@abidlabs](https://github.com/abidlabs) in [PR 2758](https://github.com/gradio-app/gradio/pull/2758)\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.13.2\n\n#\n\n### Bug Fixes:\n\n\\*No changes to highlight.\n\n-\n\n### Documentation Changes:\n\n- Improves documentation of several queuing-related parameters by [@abidlabs](https://github.com/abidlabs) in [PR 2825](https://github.com/gradio-app/gradio/pull/2825)\n\n### Testing and Infrastructure Changes:\n\n- Remove h11 pinning by [@ecederstrand](https://github.com/ecederstrand) in [PR 2820](https://github.com/gradio-app/gradio/pull/2820)\n\n#\n\n#\n\n#\n\n## 3.13.1\n\n### New Features:\n\n###### New Shareable Links\n\nReplaces tunneling logic based on ssh port-forwarding to that based on `frp` by [XciD](https://github.com/XciD) and [Wauplin](https://github.com/Wauplin) in [PR 2509](https://github.com/gradio-app/gradio/pull/2509)\n\nYou don't need to do anything differently, but when you set `share=True` in `launch()`,\nyou'll get this message and a public link that look a little bit different:\n\n```bash\nSetting up a public link... we have recently upgraded the way public links are generated. If you encounter any problems, please downgrade to gradio version 3.13.0\n.\nRunning on public URL: https://bec81a83-5b5c-471e.gradio.live\n```\n\nThese links are a more secure and scalable way to create shareable demos!\n\n### Bug Fixes:\n\n- Allows `gr.Dataframe()` to take a `pandas.DataFrame` that includes numpy array and other types as its initial value, by [@abidlabs](https://github.com/abidlabs) in [PR 2804](https://github.com/gradio-app/gradio/pull/2804)\n- Add `altair` to requirements.txt by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2811](https://github.com/gradio-app/gradio/pull/2811)\n- Added aria-labels to icon buttons that are built into UI components by [@emilyuhde](http://github.com/emilyuhde) in [PR 2791](https://github.com/gradio-app/gradio/pull/2791)\n\n### Documentation Changes:\n\n- Fixed some typos in the \"Plot Component for Maps\" guide by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2811](https://github.com/gradio-app/gradio/pull/2811)\n\n### Testing and Infrastructure Changes:\n\n- Fixed test for IP address by [@abidlabs](https://github.com/abidlabs) in [PR 2808](https://github.com/gradio-app/gradio/pull/2808)\n\n#\n\n### Full Changelog:\n\n- Fixed typo in parameter `visible` in classes in `templates.py` by [@abidlabs](https://github.com/abidlabs) in [PR 2805](https://github.com/gradio-app/gradio/pull/2805)\n- Switched external service for getting IP address from `https://api.ipify.org` to `https://checkip.amazonaws.com/` by [@abidlabs](https://github.com/abidlabs) in [PR 2810](https://github.com/gradio-app/gradio/pull/2810)\n\n#\n\n- Fixed typo in parameter `visible` in classes in `templates.py` by [@abidlabs](https://github.com/abidlabs) in [PR 2805](https://github.com/gradio-app/gradio/pull/2805)\n- Switched external service for getting IP address from `https://api.ipify.org` to `https://checkip.amazonaws.com/` by [@abidlabs](https://github.com/abidlabs) in [PR 2810](https://github.com/gradio-app/gradio/pull/2810)\n\n## 3.13.0\n\n### New Features:\n\n###### Scatter plot component\n\nIt is now possible to create a scatter plot natively in Gradio!\n\nThe `gr.ScatterPlot` component accepts a pandas dataframe and some optional configuration parameters\nand will automatically create a plot for you!\n\nThis is the first of many native plotting components in Gradio!\n\nFor an example of how to use `gr.ScatterPlot` see below:\n\n```python\nimport gradio as gr\nfrom vega_datasets import data\n\ncars = data.cars()\n\nwith gr.Blocks() as demo:\n gr.ScatterPlot(show_label=False,\n value=cars,\n x=\"Horsepower\",\n y=\"Miles_per_Gallon\",\n color=\"Origin\",\n tooltip=\"Name\",\n title=\"Car Data\",\n y_title=\"Miles per Gallon\",\n color_legend_title=\"Origin of Car\").style(container=False)\n\ndemo.launch()\n```\n\n\"image\"\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2764](https://github.com/gradio-app/gradio/pull/2764)\n\n###### Support for altair plots\n\nThe `Plot` component can now accept altair plots as values!\nSimply return an altair plot from your event listener and gradio will display it in the front-end.\nSee the example below:\n\n```python\nimport gradio as gr\nimport altair as alt\nfrom vega_datasets import data\n\ncars = data.cars()\nchart = (\n alt.Chart(cars)\n .mark_point()\n .encode(\n x=\"Horsepower\",\n y=\"Miles_per_Gallon\",\n color=\"Origin\",\n )\n)\n\nwith gr.Blocks() as demo:\n gr.Plot(value=chart)\ndemo.launch()\n```\n\n\"image\"\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2741](https://github.com/gradio-app/gradio/pull/2741)\n\n###### Set the background color of a Label component\n\nThe `Label` component now accepts a `color` argument by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2736](https://github.com/gradio-app/gradio/pull/2736).\nThe `color` argument should either be a valid css color name or hexadecimal string.\nYou can update the color with `gr.Label.update`!\n\nThis lets you create Alert and Warning boxes with the `Label` component. See below:\n\n```python\nimport gradio as gr\nimport random\n\ndef update_color(value):\n if value < 0:\n # This is bad so use red\n return \"#FF0000\"\n elif 0 <= value <= 20:\n # Ok but pay attention (use orange)\n return \"#ff9966\"\n else:\n # Nothing to worry about\n return None\n\ndef update_value():\n choice = random.choice(['good', 'bad', 'so-so'])\n color = update_color(choice)\n return gr.Label.update(value=choice, color=color)\n\n\nwith gr.Blocks() as demo:\n label = gr.Label(value=-10)\n demo.load(lambda: update_value(), inputs=None, outputs=[label], every=1)\ndemo.queue().launch()\n```\n\n![label_bg_color_update](https://user-images.githubusercontent.com/41651716/204400372-80e53857-f26f-4a38-a1ae-1acadff75e89.gif)\n\n###### Add Brazilian Portuguese translation\n\nAdd Brazilian Portuguese translation (pt-BR.json) by [@pstwh](http://github.com/pstwh) in [PR 2753](https://github.com/gradio-app/gradio/pull/2753):\n\n\"image\"\n\n### Bug Fixes:\n\n- Fixed issue where image thumbnails were not showing when an example directory was provided\n by [@abidlabs](https://github.com/abidlabs) in [PR 2745](https://github.com/gradio-app/gradio/pull/2745)\n- Fixed bug loading audio input models from the hub by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2779](https://github.com/gradio-app/gradio/pull/2779).\n- Fixed issue where entities were not merged when highlighted text was generated from the\n dictionary inputs [@payoto](https://github.com/payoto) in [PR 2767](https://github.com/gradio-app/gradio/pull/2767)\n- Fixed bug where generating events did not finish running even if the websocket connection was closed by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2783](https://github.com/gradio-app/gradio/pull/2783).\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Images in the chatbot component are now resized if they exceed a max width by [@abidlabs](https://github.com/abidlabs) in [PR 2748](https://github.com/gradio-app/gradio/pull/2748)\n- Missing parameters have been added to `gr.Blocks().load()` by [@abidlabs](https://github.com/abidlabs) in [PR 2755](https://github.com/gradio-app/gradio/pull/2755)\n- Deindex share URLs from search by [@aliabd](https://github.com/aliabd) in [PR 2772](https://github.com/gradio-app/gradio/pull/2772)\n- Redirect old links and fix broken ones by [@aliabd](https://github.com/aliabd) in [PR 2774](https://github.com/gradio-app/gradio/pull/2774)\n\n#\n\n## 3.12.0\n\n### New Features:\n\n###### The `Chatbot` component now supports a subset of Markdown (including bold, italics, code, images)\n\nYou can now pass in some Markdown to the Chatbot component and it will show up,\nmeaning that you can pass in images as well! by [@abidlabs](https://github.com/abidlabs) in [PR 2731](https://github.com/gradio-app/gradio/pull/2731)\n\nHere's a simple example that references a local image `lion.jpg` that is in the same\nfolder as the Python script:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Chatbot([(\"hi\", \"hello **abubakar**\"), (\"![](/file=lion.jpg)\", \"cool pic\")])\n\ndemo.launch()\n```\n\n![Alt text](https://user-images.githubusercontent.com/1778297/204357455-5c1a4002-eee7-479d-9a1e-ba2c12522723.png)\n\nTo see a more realistic example, see the new demo `/demo/chatbot_multimodal/run.py`.\n\n###### Latex support\n\nAdded mathtext (a subset of latex) support to gr.Markdown. Added by [@kashif](https://github.com/kashif) and [@aliabid94](https://github.com/aliabid94) in [PR 2696](https://github.com/gradio-app/gradio/pull/2696).\n\nExample of how it can be used:\n\n```python\ngr.Markdown(\n r\"\"\"\n # Hello World! $\\frac{\\sqrt{x + y}}{4}$ is today's lesson.\n \"\"\")\n```\n\n###### Update Accordion properties from the backend\n\nYou can now update the Accordion `label` and `open` status with `gr.Accordion.update` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2690](https://github.com/gradio-app/gradio/pull/2690)\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Accordion(label=\"Open for greeting\", open=False) as accordion:\n gr.Textbox(\"Hello!\")\n open_btn = gr.Button(value=\"Open Accordion\")\n close_btn = gr.Button(value=\"Close Accordion\")\n open_btn.click(\n lambda: gr.Accordion.update(open=True, label=\"Open Accordion\"),\n inputs=None,\n outputs=[accordion],\n )\n close_btn.click(\n lambda: gr.Accordion.update(open=False, label=\"Closed Accordion\"),\n inputs=None,\n outputs=[accordion],\n )\ndemo.launch()\n```\n\n![update_accordion](https://user-images.githubusercontent.com/41651716/203164176-b102eae3-babe-4986-ae30-3ab4f400cedc.gif)\n\n### Bug Fixes:\n\n- Fixed bug where requests timeout is missing from utils.version_check() by [@yujiehecs](https://github.com/yujiehecs) in [PR 2729](https://github.com/gradio-app/gradio/pull/2729)\n- Fixed bug where so that the `File` component can properly preprocess files to \"binary\" byte-string format by [CoffeeVampir3](https://github.com/CoffeeVampir3) in [PR 2727](https://github.com/gradio-app/gradio/pull/2727)\n- Fixed bug to ensure that filenames are less than 200 characters even for non-English languages by [@SkyTNT](https://github.com/SkyTNT) in [PR 2685](https://github.com/gradio-app/gradio/pull/2685)\n\n### Documentation Changes:\n\n- Performance improvements to docs on mobile by [@aliabd](https://github.com/aliabd) in [PR 2730](https://github.com/gradio-app/gradio/pull/2730)\n\n#\n\n#\n\n### Full Changelog:\n\n- Make try examples button more prominent by [@aliabd](https://github.com/aliabd) in [PR 2705](https://github.com/gradio-app/gradio/pull/2705)\n- Fix id clashes in docs by [@aliabd](https://github.com/aliabd) in [PR 2713](https://github.com/gradio-app/gradio/pull/2713)\n- Fix typos in guide docs by [@andridns](https://github.com/andridns) in [PR 2722](https://github.com/gradio-app/gradio/pull/2722)\n- Add option to `include_audio` in Video component. When `True`, for `source=\"webcam\"` this will record audio and video, for `source=\"upload\"` this will retain the audio in an uploaded video by [@mandargogate](https://github.com/MandarGogate) in [PR 2721](https://github.com/gradio-app/gradio/pull/2721)\n\n### Contributors Shoutout:\n\n- [@andridns](https://github.com/andridns) made their first contribution in [PR 2722](https://github.com/gradio-app/gradio/pull/2722)!\n\n## 3.11.0\n\n### New Features:\n\n###### Upload Button\n\nThere is now a new component called the `UploadButton` which is a file upload component but in button form! You can also specify what file types it should accept in the form of a list (ex: `image`, `video`, `audio`, `text`, or generic `file`). Added by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2591](https://github.com/gradio-app/gradio/pull/2591).\n\nExample of how it can be used:\n\n```python\nimport gradio as gr\n\ndef upload_file(files):\n file_paths = [file.name for file in files]\n return file_paths\n\nwith gr.Blocks() as demo:\n file_output = gr.File()\n upload_button = gr.UploadButton(\"Click to Upload a File\", file_types=[\"image\", \"video\"], file_count=\"multiple\")\n upload_button.upload(upload_file, upload_button, file_output)\n\ndemo.launch()\n```\n\n###### Revamped API documentation page\n\nNew API Docs page with in-browser playground and updated aesthetics. [@gary149](https://github.com/gary149) in [PR 2652](https://github.com/gradio-app/gradio/pull/2652)\n\n###### Revamped Login page\n\nPreviously our login page had its own CSS, had no dark mode, and had an ugly json message on the wrong credentials. Made the page more aesthetically consistent, added dark mode support, and a nicer error message. [@aliabid94](https://github.com/aliabid94) in [PR 2684](https://github.com/gradio-app/gradio/pull/2684)\n\n###### Accessing the Requests Object Directly\n\nYou can now access the Request object directly in your Python function by [@abidlabs](https://github.com/abidlabs) in [PR 2641](https://github.com/gradio-app/gradio/pull/2641). This means that you can access request headers, the client IP address, and so on. In order to use it, add a parameter to your function and set its type hint to be `gr.Request`. Here's a simple example:\n\n```py\nimport gradio as gr\n\ndef echo(name, request: gr.Request):\n if request:\n print(\"Request headers dictionary:\", request.headers)\n print(\"IP address:\", request.client.host)\n return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n```\n\n### Bug Fixes:\n\n- Fixed bug that limited files from being sent over websockets to 16MB. The new limit\n is now 1GB by [@abidlabs](https://github.com/abidlabs) in [PR 2709](https://github.com/gradio-app/gradio/pull/2709)\n\n### Documentation Changes:\n\n- Updated documentation for embedding Gradio demos on Spaces as web components by\n [@julien-c](https://github.com/julien-c) in [PR 2698](https://github.com/gradio-app/gradio/pull/2698)\n- Updated IFrames in Guides to use the host URL instead of the Space name to be consistent with the new method for embedding Spaces, by\n [@julien-c](https://github.com/julien-c) in [PR 2692](https://github.com/gradio-app/gradio/pull/2692)\n- Colab buttons on every demo in the website! Just click open in colab, and run the demo there.\n\nhttps://user-images.githubusercontent.com/9021060/202878400-cb16ed47-f4dd-4cb0-b2f0-102a9ff64135.mov\n\n#\n\n#\n\n### Full Changelog:\n\n- Better warnings and error messages for `gr.Interface.load()` by [@abidlabs](https://github.com/abidlabs) in [PR 2694](https://github.com/gradio-app/gradio/pull/2694)\n- Add open in colab buttons to demos in docs and /demos by [@aliabd](https://github.com/aliabd) in [PR 2608](https://github.com/gradio-app/gradio/pull/2608)\n- Apply different formatting for the types in component docstrings by [@aliabd](https://github.com/aliabd) in [PR 2707](https://github.com/gradio-app/gradio/pull/2707)\n\n#\n\n## 3.10.1\n\n#\n\n### Bug Fixes:\n\n- Passes kwargs into `gr.Interface.load()` by [@abidlabs](https://github.com/abidlabs) in [PR 2669](https://github.com/gradio-app/gradio/pull/2669)\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Clean up printed statements in Embedded Colab Mode by [@aliabid94](https://github.com/aliabid94) in [PR 2612](https://github.com/gradio-app/gradio/pull/2612)\n\n#\n\n## 3.10.0\n\n- Add support for `'password'` and `'email'` types to `Textbox`. [@pngwn](https://github.com/pngwn) in [PR 2653](https://github.com/gradio-app/gradio/pull/2653)\n- `gr.Textbox` component will now raise an exception if `type` is not \"text\", \"email\", or \"password\" [@pngwn](https://github.com/pngwn) in [PR 2653](https://github.com/gradio-app/gradio/pull/2653). This will cause demos using the deprecated `gr.Textbox(type=\"number\")` to raise an exception.\n\n### Bug Fixes:\n\n- Updated the minimum FastApi used in tests to version 0.87 by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2647](https://github.com/gradio-app/gradio/pull/2647)\n- Fixed bug where interfaces with examples could not be loaded with `gr.Interface.load` by [@freddyaboulton](https://github.com/freddyaboulton) [PR 2640](https://github.com/gradio-app/gradio/pull/2640)\n- Fixed bug where the `interactive` property of a component could not be updated by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2639](https://github.com/gradio-app/gradio/pull/2639)\n- Fixed bug where some URLs were not being recognized as valid URLs and thus were not\n loading correctly in various components by [@abidlabs](https://github.com/abidlabs) in [PR 2659](https://github.com/gradio-app/gradio/pull/2659)\n\n### Documentation Changes:\n\n- Fix some typos in the embedded demo names in \"05_using_blocks_like_functions.md\" by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2656](https://github.com/gradio-app/gradio/pull/2656)\n\n#\n\n#\n\n### Full Changelog:\n\n- Add support for `'password'` and `'email'` types to `Textbox`. [@pngwn](https://github.com/pngwn) in [PR 2653](https://github.com/gradio-app/gradio/pull/2653)\n\n#\n\n## 3.9.1\n\n#\n\n### Bug Fixes:\n\n- Only set a min height on md and html when loading by [@pngwn](https://github.com/pngwn) in [PR 2623](https://github.com/gradio-app/gradio/pull/2623)\n\n### Documentation Changes:\n\n- See docs for the latest gradio commit to main as well the latest pip release:\n\n![main-vs-pip](https://user-images.githubusercontent.com/9021060/199607887-aab1ae4e-a070-4527-966d-024397abe15b.gif)\n\n- Modified the \"Connecting To a Database Guide\" to use `pd.read_sql` as opposed to low-level postgres connector by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2604](https://github.com/gradio-app/gradio/pull/2604)\n\n#\n\n#\n\n### Full Changelog:\n\n- Dropdown for seeing docs as latest or main by [@aliabd](https://github.com/aliabd) in [PR 2544](https://github.com/gradio-app/gradio/pull/2544)\n- Allow `gr.Templates` to accept parameters to override the defaults by [@abidlabs](https://github.com/abidlabs) in [PR 2600](https://github.com/gradio-app/gradio/pull/2600)\n- Components now throw a `ValueError()` if constructed with invalid parameters for `type` or `source` (for components that take those parameters) in [PR 2610](https://github.com/gradio-app/gradio/pull/2610)\n- Allow auth with using queue by [@GLGDLY](https://github.com/GLGDLY) in [PR 2611](https://github.com/gradio-app/gradio/pull/2611)\n\n#\n\n## 3.9\n\n### New Features:\n\n- Gradio is now embedded directly in colab without requiring the share link by [@aliabid94](https://github.com/aliabid94) in [PR 2455](https://github.com/gradio-app/gradio/pull/2455)\n\n###### Calling functions by api_name in loaded apps\n\nWhen you load an upstream app with `gr.Blocks.load`, you can now specify which fn\nto call with the `api_name` parameter.\n\n```python\nimport gradio as gr\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english-translator\")\ngerman = english_translator(\"My name is Freddy\", api_name='translate-to-german')\n```\n\nThe `api_name` parameter will take precedence over the `fn_index` parameter.\n\n### Bug Fixes:\n\n- Fixed bug where None could not be used for File,Model3D, and Audio examples by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2588](https://github.com/gradio-app/gradio/pull/2588)\n- Fixed links in Plotly map guide + demo by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2578](https://github.com/gradio-app/gradio/pull/2578)\n- `gr.Blocks.load()` now correctly loads example files from Spaces [@abidlabs](https://github.com/abidlabs) in [PR 2594](https://github.com/gradio-app/gradio/pull/2594)\n- Fixed bug when image clear started upload dialog [@mezotaken](https://github.com/mezotaken) in [PR 2577](https://github.com/gradio-app/gradio/pull/2577)\n\n### Documentation Changes:\n\n- Added a Guide on how to configure the queue for maximum performance by [@abidlabs](https://github.com/abidlabs) in [PR 2558](https://github.com/gradio-app/gradio/pull/2558)\n\n#\n\n#\n\n### Full Changelog:\n\n- Add `api_name` to `Blocks.__call__` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2593](https://github.com/gradio-app/gradio/pull/2593)\n- Update queue with using deque & update requirements by [@GLGDLY](https://github.com/GLGDLY) in [PR 2428](https://github.com/gradio-app/gradio/pull/2428)\n\n#\n\n## 3.8.2\n\n### Bug Fixes:\n\n- Ensure gradio apps embedded via spaces use the correct endpoint for predictions. [@pngwn](https://github.com/pngwn) in [PR 2567](https://github.com/gradio-app/gradio/pull/2567)\n- Ensure gradio apps embedded via spaces use the correct websocket protocol. [@pngwn](https://github.com/pngwn) in [PR 2571](https://github.com/gradio-app/gradio/pull/2571)\n\n### New Features:\n\n###### Running Events Continuously\n\nGradio now supports the ability to run an event continuously on a fixed schedule. To use this feature,\npass `every=# of seconds` to the event definition. This will run the event every given number of seconds!\n\nThis can be used to:\n\n- Create live visualizations that show the most up to date data\n- Refresh the state of the frontend automatically in response to changes in the backend\n\nHere is an example of a live plot that refreshes every half second:\n\n```python\nimport math\nimport gradio as gr\nimport plotly.express as px\nimport numpy as np\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2*math.pi*period * x)\n fig = px.line(x=x, y=y)\n plot_end += 2 * math.pi\n return fig\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n plot = gr.Plot(label=\"Plot (updates every half second)\")\n\n dep = demo.load(get_plot, None, plot, every=0.5)\n period.change(get_plot, period, plot, every=0.5, cancels=[dep])\n\ndemo.queue().launch()\n```\n\n![live_demo](https://user-images.githubusercontent.com/41651716/198357377-633ce460-4e31-47bd-8202-1440cdd6fe19.gif)\n\n#\n\n### Documentation Changes:\n\n- Explained how to set up `queue` and `auth` when working with reload mode by by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3089)\n\n#\n\n#\n\n### Full Changelog:\n\n- Allows loading private Spaces by passing an an `api_key` to `gr.Interface.load()`\n by [@abidlabs](https://github.com/abidlabs) in [PR 2568](https://github.com/gradio-app/gradio/pull/2568)\n\n#\n\n## 3.8\n\n### New Features:\n\n- Allows event listeners to accept a single dictionary as its argument, where the keys are the components and the values are the component values. This is set by passing the input components in the event listener as a set instead of a list. [@aliabid94](https://github.com/aliabid94) in [PR 2550](https://github.com/gradio-app/gradio/pull/2550)\n\n### Bug Fixes:\n\n- Fix whitespace issue when using plotly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2548](https://github.com/gradio-app/gradio/pull/2548)\n- Apply appropriate alt text to all gallery images. [@camenduru](https://github.com/camenduru) in [PR 2358](https://github.com/gradio-app/gradio/pull/2538)\n- Removed erroneous tkinter import in gradio.blocks by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2555](https://github.com/gradio-app/gradio/pull/2555)\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Added the `every` keyword to event listeners that runs events on a fixed schedule by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2512](https://github.com/gradio-app/gradio/pull/2512)\n- Fix whitespace issue when using plotly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2548](https://github.com/gradio-app/gradio/pull/2548)\n- Apply appropriate alt text to all gallery images. [@camenduru](https://github.com/camenduru) in [PR 2358](https://github.com/gradio-app/gradio/pull/2538)\n\n#\n\n## 3.7\n\n### New Features:\n\n###### Batched Functions\n\nGradio now supports the ability to pass _batched_ functions. Batched functions are just\nfunctions which take in a list of inputs and return a list of predictions.\n\nFor example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:\n\n```py\nimport time\n\ndef trim_words(words, lens):\n trimmed_words = []\n time.sleep(5)\n for w, l in zip(words, lens):\n trimmed_words.append(w[:l])\n return [trimmed_words]\n```\n\nThe advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically _batch_ incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe `batch=True` and `max_batch_size=16` -- both of these parameters can be passed\ninto event triggers or into the `Interface` class)\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n word = gr.Textbox(label=\"word\", value=\"abc\")\n leng = gr.Number(label=\"leng\", precision=0, value=1)\n output = gr.Textbox(label=\"Output\")\n with gr.Row():\n run = gr.Button()\n\n event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n```\n\nIn the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds).\n\n###### Upload Event\n\n`Video`, `Audio`, `Image`, and `File` components now support a `upload()` event that is triggered when a user uploads a file into any of these components.\n\nExample usage:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n input_video = gr.Video()\n output_video = gr.Video()\n\n # Clears the output video when an input video is uploaded\n input_video.upload(lambda : None, None, output_video)\n```\n\n### Bug Fixes:\n\n- Fixes issue where plotly animations, interactivity, titles, legends, were not working properly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2486](https://github.com/gradio-app/gradio/pull/2486)\n- Prevent requests to the `/api` endpoint from skipping the queue if the queue is enabled for that event by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2493](https://github.com/gradio-app/gradio/pull/2493)\n- Fixes a bug with `cancels` in event triggers so that it works properly if multiple\n Blocks are rendered by [@abidlabs](https://github.com/abidlabs) in [PR 2530](https://github.com/gradio-app/gradio/pull/2530)\n- Prevent invalid targets of events from crashing the whole application. [@pngwn](https://github.com/pngwn) in [PR 2534](https://github.com/gradio-app/gradio/pull/2534)\n- Properly dequeue cancelled events when multiple apps are rendered by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2540](https://github.com/gradio-app/gradio/pull/2540)\n- Fixes videos being cropped due to height/width params not being used [@hannahblair](https://github.com/hannahblair) in [PR 4946](https://github.com/gradio-app/gradio/pull/4946)\n\n### Documentation Changes:\n\n- Added an example interactive dashboard to the \"Tabular & Plots\" section of the Demos page by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2508](https://github.com/gradio-app/gradio/pull/2508)\n\n#\n\n#\n\n### Full Changelog:\n\n- Fixes the error message if a user builds Gradio locally and tries to use `share=True` by [@abidlabs](https://github.com/abidlabs) in [PR 2502](https://github.com/gradio-app/gradio/pull/2502)\n- Allows the render() function to return self by [@Raul9595](https://github.com/Raul9595) in [PR 2514](https://github.com/gradio-app/gradio/pull/2514)\n- Fixes issue where plotly animations, interactivity, titles, legends, were not working properly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2486](https://github.com/gradio-app/gradio/pull/2486)\n- Gradio now supports batched functions by [@abidlabs](https://github.com/abidlabs) in [PR 2218](https://github.com/gradio-app/gradio/pull/2218)\n- Add `upload` event for `Video`, `Audio`, `Image`, and `File` components [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2448](https://github.com/gradio-app/gradio/pull/2456)\n- Changes websocket path for Spaces as it is no longer necessary to have a different URL for websocket connections on Spaces by [@abidlabs](https://github.com/abidlabs) in [PR 2528](https://github.com/gradio-app/gradio/pull/2528)\n- Clearer error message when events are defined outside of a Blocks scope, and a warning if you\n try to use `Series` or `Parallel` with `Blocks` by [@abidlabs](https://github.com/abidlabs) in [PR 2543](https://github.com/gradio-app/gradio/pull/2543)\n- Adds support for audio samples that are in `float64`, `float16`, or `uint16` formats by [@abidlabs](https://github.com/abidlabs) in [PR 2545](https://github.com/gradio-app/gradio/pull/2545)\n\n#\n\n## 3.6\n\n### New Features:\n\n###### Cancelling Running Events\n\nRunning events can be cancelled when other events are triggered! To test this feature, pass the `cancels` parameter to the event listener.\nFor this feature to work, the queue must be enabled.\n\n![cancel_on_change_rl](https://user-images.githubusercontent.com/41651716/195952623-61a606bd-e82b-4e1a-802e-223154cb8727.gif)\n\nCode:\n\n```python\nimport time\nimport gradio as gr\n\ndef fake_diffusion(steps):\n for i in range(steps):\n time.sleep(1)\n yield str(i)\n\ndef long_prediction(*args, **kwargs):\n time.sleep(10)\n return 42\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n n = gr.Slider(1, 10, value=9, step=1, label=\"Number Steps\")\n run = gr.Button()\n output = gr.Textbox(label=\"Iterative Output\")\n stop = gr.Button(value=\"Stop Iterating\")\n with gr.Column():\n prediction = gr.Number(label=\"Expensive Calculation\")\n run_pred = gr.Button(value=\"Run Expensive Calculation\")\n with gr.Column():\n cancel_on_change = gr.Textbox(label=\"Cancel Iteration and Expensive Calculation on Change\")\n\n click_event = run.click(fake_diffusion, n, output)\n stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event])\n pred_event = run_pred.click(fn=long_prediction, inputs=None, outputs=prediction)\n\n cancel_on_change.change(None, None, None, cancels=[click_event, pred_event])\n\n\ndemo.queue(concurrency_count=1, max_size=20).launch()\n```\n\nFor interfaces, a stop button will be added automatically if the function uses a `yield` statement.\n\n```python\nimport gradio as gr\nimport time\n\ndef iteration(steps):\n for i in range(steps):\n time.sleep(0.5)\n yield i\n\ngr.Interface(iteration,\n inputs=gr.Slider(minimum=1, maximum=10, step=1, value=5),\n outputs=gr.Number()).queue().launch()\n```\n\n![stop_interface_rl](https://user-images.githubusercontent.com/41651716/195952883-e7ca4235-aae3-4852-8f28-96d01d0c5822.gif)\n\n### Bug Fixes:\n\n- Add loading status tracker UI to HTML and Markdown components. [@pngwn](https://github.com/pngwn) in [PR 2474](https://github.com/gradio-app/gradio/pull/2474)\n- Fixed videos being mirrored in the front-end if source is not webcam by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2475](https://github.com/gradio-app/gradio/pull/2475)\n- Add clear button for timeseries component [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2487](https://github.com/gradio-app/gradio/pull/2487)\n- Removes special characters from temporary filenames so that the files can be served by components [@abidlabs](https://github.com/abidlabs) in [PR 2480](https://github.com/gradio-app/gradio/pull/2480)\n- Fixed infinite reload loop when mounting gradio as a sub application by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2477](https://github.com/gradio-app/gradio/pull/2477)\n\n### Documentation Changes:\n\n- Adds a demo to show how a sound alert can be played upon completion of a prediction by [@abidlabs](https://github.com/abidlabs) in [PR 2478](https://github.com/gradio-app/gradio/pull/2478)\n\n#\n\n#\n\n### Full Changelog:\n\n- Enable running events to be cancelled from other events by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2433](https://github.com/gradio-app/gradio/pull/2433)\n- Small fix for version check before reuploading demos by [@aliabd](https://github.com/aliabd) in [PR 2469](https://github.com/gradio-app/gradio/pull/2469)\n- Add loading status tracker UI to HTML and Markdown components. [@pngwn](https://github.com/pngwn) in [PR 2400](https://github.com/gradio-app/gradio/pull/2474)\n- Add clear button for timeseries component [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2487](https://github.com/gradio-app/gradio/pull/2487)\n\n#\n\n## 3.5\n\n### Bug Fixes:\n\n- Ensure that Gradio does not take control of the HTML page title when embedding a gradio app as a web component, this behaviour flipped by adding `control_page_title=\"true\"` to the webcomponent. [@pngwn](https://github.com/pngwn) in [PR 2400](https://github.com/gradio-app/gradio/pull/2400)\n- Decreased latency in iterative-output demos by making the iteration asynchronous [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2409](https://github.com/gradio-app/gradio/pull/2409)\n- Fixed queue getting stuck under very high load by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2374](https://github.com/gradio-app/gradio/pull/2374)\n- Ensure that components always behave as if `interactive=True` were set when the following conditions are true:\n\n - no default value is provided,\n - they are not set as the input or output of an event,\n - `interactive` kwarg is not set.\n\n [@pngwn](https://github.com/pngwn) in [PR 2459](https://github.com/gradio-app/gradio/pull/2459)\n\n### New Features:\n\n- When an `Image` component is set to `source=\"upload\"`, it is now possible to drag and drop and image to replace a previously uploaded image by [@pngwn](https://github.com/pngwn) in [PR 1711](https://github.com/gradio-app/gradio/issues/1711)\n- The `gr.Dataset` component now accepts `HTML` and `Markdown` components by [@abidlabs](https://github.com/abidlabs) in [PR 2437](https://github.com/gradio-app/gradio/pull/2437)\n\n### Documentation Changes:\n\n- Improved documentation for the `gr.Dataset` component by [@abidlabs](https://github.com/abidlabs) in [PR 2437](https://github.com/gradio-app/gradio/pull/2437)\n\n#\n\n### Breaking Changes:\n\n- The `Carousel` component is officially deprecated. Since gradio 3.0, code containing the `Carousel` component would throw warnings. As of the next release, the `Carousel` component will raise an exception.\n\n### Full Changelog:\n\n- Speeds up Gallery component by using temporary files instead of base64 representation in the front-end by [@proxyphi](https://github.com/proxyphi), [@pngwn](https://github.com/pngwn), and [@abidlabs](https://github.com/abidlabs) in [PR 2265](https://github.com/gradio-app/gradio/pull/2265)\n- Fixed some embedded demos in the guides by not loading the gradio web component in some guides by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2403](https://github.com/gradio-app/gradio/pull/2403)\n- When an `Image` component is set to `source=\"upload\"`, it is now possible to drag and drop and image to replace a previously uploaded image by [@pngwn](https://github.com/pngwn) in [PR 2400](https://github.com/gradio-app/gradio/pull/2410)\n- Improve documentation of the `Blocks.load()` event by [@abidlabs](https://github.com/abidlabs) in [PR 2413](https://github.com/gradio-app/gradio/pull/2413)\n- Decreased latency in iterative-output demos by making the iteration asynchronous [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2409](https://github.com/gradio-app/gradio/pull/2409)\n- Updated share link message to reference new Spaces Hardware [@abidlabs](https://github.com/abidlabs) in [PR 2423](https://github.com/gradio-app/gradio/pull/2423)\n- Automatically restart spaces if they're down by [@aliabd](https://github.com/aliabd) in [PR 2405](https://github.com/gradio-app/gradio/pull/2405)\n- Carousel component is now deprecated by [@abidlabs](https://github.com/abidlabs) in [PR 2434](https://github.com/gradio-app/gradio/pull/2434)\n- Build Gradio from source in ui tests by by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2440](https://github.com/gradio-app/gradio/pull/2440)\n- Change \"return ValueError\" to \"raise ValueError\" by [@vzakharov](https://github.com/vzakharov) in [PR 2445](https://github.com/gradio-app/gradio/pull/2445)\n- Add guide on creating a map demo using the `gr.Plot()` component [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2402](https://github.com/gradio-app/gradio/pull/2402)\n- Add blur event for `Textbox` and `Number` components [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2448](https://github.com/gradio-app/gradio/pull/2448)\n- Stops a gradio launch from hogging a port even after it's been killed [@aliabid94](https://github.com/aliabid94) in [PR 2453](https://github.com/gradio-app/gradio/pull/2453)\n- Fix embedded interfaces on touch screen devices by [@aliabd](https://github.com/aliabd) in [PR 2457](https://github.com/gradio-app/gradio/pull/2457)\n- Upload all demos to spaces by [@aliabd](https://github.com/aliabd) in [PR 2281](https://github.com/gradio-app/gradio/pull/2281)\n\n#\n\n## 3.4.1\n\n### New Features:\n\n###### 1. See Past and Upcoming Changes in the Release History \ud83d\udc40\n\nYou can now see gradio's release history directly on the website, and also keep track of upcoming changes. Just go [here](https://gradio.app/changelog/).\n\n![release-history](https://user-images.githubusercontent.com/9021060/193145458-3de699f7-7620-45de-aa73-a1c1b9b96257.gif)\n\n### Bug Fixes:\n\n1. Fix typo in guide image path by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2357](https://github.com/gradio-app/gradio/pull/2357)\n2. Raise error if Blocks has duplicate component with same IDs by [@abidlabs](https://github.com/abidlabs) in [PR 2359](https://github.com/gradio-app/gradio/pull/2359)\n3. Catch the permission exception on the audio component by [@Ian-GL](https://github.com/Ian-GL) in [PR 2330](https://github.com/gradio-app/gradio/pull/2330)\n4. Fix image_classifier_interface_load demo by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2365](https://github.com/gradio-app/gradio/pull/2365)\n5. Fix combining adjacent components without gaps by introducing `gr.Row(variant=\"compact\")` by [@aliabid94](https://github.com/aliabid94) in [PR 2291](https://github.com/gradio-app/gradio/pull/2291) This comes with deprecation of the following arguments for `Component.style`: `round`, `margin`, `border`.\n6. Fix audio streaming, which was previously choppy in [PR 2351](https://github.com/gradio-app/gradio/pull/2351). Big thanks to [@yannickfunk](https://github.com/yannickfunk) for the proposed solution.\n7. Fix bug where new typeable slider doesn't respect the minimum and maximum values [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2380](https://github.com/gradio-app/gradio/pull/2380)\n\n### Documentation Changes:\n\n1. New Guide: Connecting to a Database \ud83d\uddc4\ufe0f\n\n A new guide by [@freddyaboulton](https://github.com/freddyaboulton) that explains how you can use Gradio to connect your app to a database. Read more [here](https://gradio.app/connecting_to_a_database/).\n\n2. New Guide: Running Background Tasks \ud83e\udd77\n\n A new guide by [@freddyaboulton](https://github.com/freddyaboulton) that explains how you can run background tasks from your gradio app. Read more [here](https://gradio.app/running_background_tasks/).\n\n3. Small fixes to docs for `Image` component by [@abidlabs](https://github.com/abidlabs) in [PR 2372](https://github.com/gradio-app/gradio/pull/2372)\n\n#\n\n#\n\n### Full Changelog:\n\n- Create a guide on how to connect an app to a database hosted on the cloud by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2341](https://github.com/gradio-app/gradio/pull/2341)\n- Removes `analytics` dependency by [@abidlabs](https://github.com/abidlabs) in [PR 2347](https://github.com/gradio-app/gradio/pull/2347)\n- Add guide on launching background tasks from your app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2350](https://github.com/gradio-app/gradio/pull/2350)\n- Fix typo in guide image path by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2357](https://github.com/gradio-app/gradio/pull/2357)\n- Raise error if Blocks has duplicate component with same IDs by [@abidlabs](https://github.com/abidlabs) in [PR 2359](https://github.com/gradio-app/gradio/pull/2359)\n- Hotfix: fix version back to 3.4 by [@abidlabs](https://github.com/abidlabs) in [PR 2361](https://github.com/gradio-app/gradio/pull/2361)\n- Change version.txt to 3.4 instead of 3.4.0 by [@aliabd](https://github.com/aliabd) in [PR 2363](https://github.com/gradio-app/gradio/pull/2363)\n- Catch the permission exception on the audio component by [@Ian-GL](https://github.com/Ian-GL) in [PR 2330](https://github.com/gradio-app/gradio/pull/2330)\n- Fix image_classifier_interface_load demo by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2365](https://github.com/gradio-app/gradio/pull/2365)\n- Small fixes to docs for `Image` component by [@abidlabs](https://github.com/abidlabs) in [PR 2372](https://github.com/gradio-app/gradio/pull/2372)\n- Automated Release Notes by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2306](https://github.com/gradio-app/gradio/pull/2306)\n- Fixed small typos in the docs [@julien-c](https://github.com/julien-c) in [PR 2373](https://github.com/gradio-app/gradio/pull/2373)\n- Adds ability to disable pre/post-processing for examples [@abidlabs](https://github.com/abidlabs) in [PR 2383](https://github.com/gradio-app/gradio/pull/2383)\n- Copy changelog file in website docker by [@aliabd](https://github.com/aliabd) in [PR 2384](https://github.com/gradio-app/gradio/pull/2384)\n- Lets users provide a `gr.update()` dictionary even if post-processing is disabled [@abidlabs](https://github.com/abidlabs) in [PR 2385](https://github.com/gradio-app/gradio/pull/2385)\n- Fix bug where errors would cause apps run in reload mode to hang forever by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2394](https://github.com/gradio-app/gradio/pull/2394)\n- Fix bug where new typeable slider doesn't respect the minimum and maximum values [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2380](https://github.com/gradio-app/gradio/pull/2380)\n\n#\n\n## 3.4\n\n### New Features:\n\n###### 1. Gallery Captions \ud83d\uddbc\ufe0f\n\nYou can now pass captions to images in the Gallery component. To do so you need to pass a {List} of (image, {str} caption) tuples. This is optional and the component also accepts just a list of the images.\n\nHere's an example:\n\n```python\nimport gradio as gr\n\nimages_with_captions = [\n (\"https://images.unsplash.com/photo-1551969014-7d2c4cddf0b6\", \"Cheetah by David Groves\"),\n (\"https://images.unsplash.com/photo-1546182990-dffeafbe841d\", \"Lion by Francesco\"),\n (\"https://images.unsplash.com/photo-1561731216-c3a4d99437d5\", \"Tiger by Mike Marrah\")\n ]\n\nwith gr.Blocks() as demo:\n gr.Gallery(value=images_with_captions)\n\ndemo.launch()\n```\n\n\"gallery_captions\"\n\n###### 2. Type Values into the Slider \ud83d\udd22\n\nYou can now type values directly on the Slider component! Here's what it looks like:\n\n![type-slider](https://user-images.githubusercontent.com/9021060/192399877-76b662a1-fede-4417-a932-fc15f0da7360.gif)\n\n###### 3. Better Sketching and Inpainting \ud83c\udfa8\n\nWe've made a lot of changes to our Image component so that it can support better sketching and inpainting.\n\nNow supports:\n\n- A standalone black-and-white sketch\n\n```python\nimport gradio as gr\ndemo = gr.Interface(lambda x: x, gr.Sketchpad(), gr.Image())\ndemo.launch()\n```\n\n![bw](https://user-images.githubusercontent.com/9021060/192410264-b08632b5-7b2a-4f86-afb0-5760e7b474cf.gif)\n\n- A standalone color sketch\n\n```python\nimport gradio as gr\ndemo = gr.Interface(lambda x: x, gr.Paint(), gr.Image())\ndemo.launch()\n```\n\n![color-sketch](https://user-images.githubusercontent.com/9021060/192410500-3c8c3e64-a5fd-4df2-a991-f0a5cef93728.gif)\n\n- An uploadable image with black-and-white or color sketching\n\n```python\nimport gradio as gr\ndemo = gr.Interface(lambda x: x, gr.Image(source='upload', tool='color-sketch'), gr.Image()) # for black and white, tool = 'sketch'\ndemo.launch()\n```\n\n![sketch-new](https://user-images.githubusercontent.com/9021060/192402422-e53cb7b6-024e-448c-87eb-d6a35a63c476.gif)\n\n- Webcam with black-and-white or color sketching\n\n```python\nimport gradio as gr\ndemo = gr.Interface(lambda x: x, gr.Image(source='webcam', tool='color-sketch'), gr.Image()) # for black and white, tool = 'sketch'\ndemo.launch()\n```\n\n![webcam-sketch](https://user-images.githubusercontent.com/9021060/192410820-0ffaf324-776e-4e1f-9de6-0fdbbf4940fa.gif)\n\nAs well as other fixes\n\n### Bug Fixes:\n\n1. Fix bug where max concurrency count is not respected in queue by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2286](https://github.com/gradio-app/gradio/pull/2286)\n2. fix : queue could be blocked by [@SkyTNT](https://github.com/SkyTNT) in [PR 2288](https://github.com/gradio-app/gradio/pull/2288)\n3. Supports `gr.update()` in example caching by [@abidlabs](https://github.com/abidlabs) in [PR 2309](https://github.com/gradio-app/gradio/pull/2309)\n4. Clipboard fix for iframes by [@abidlabs](https://github.com/abidlabs) in [PR 2321](https://github.com/gradio-app/gradio/pull/2321)\n5. Fix: Dataframe column headers are reset when you add a new column by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2318](https://github.com/gradio-app/gradio/pull/2318)\n6. Added support for URLs for Video, Audio, and Image by [@abidlabs](https://github.com/abidlabs) in [PR 2256](https://github.com/gradio-app/gradio/pull/2256)\n7. Add documentation about how to create and use the Gradio FastAPI app by [@abidlabs](https://github.com/abidlabs) in [PR 2263](https://github.com/gradio-app/gradio/pull/2263)\n\n### Documentation Changes:\n\n1. Adding a Playground Tab to the Website by [@aliabd](https://github.com/aliabd) in [PR 1860](https://github.com/gradio-app/gradio/pull/1860)\n2. Gradio for Tabular Data Science Workflows Guide by [@merveenoyan](https://github.com/merveenoyan) in [PR 2199](https://github.com/gradio-app/gradio/pull/2199)\n3. Promotes `postprocess` and `preprocess` to documented parameters by [@abidlabs](https://github.com/abidlabs) in [PR 2293](https://github.com/gradio-app/gradio/pull/2293)\n4. Update 2)key_features.md by [@voidxd](https://github.com/voidxd) in [PR 2326](https://github.com/gradio-app/gradio/pull/2326)\n5. Add docs to blocks context postprocessing function by [@Ian-GL](https://github.com/Ian-GL) in [PR 2332](https://github.com/gradio-app/gradio/pull/2332)\n\n### Testing and Infrastructure Changes\n\n1. Website fixes and refactoring by [@aliabd](https://github.com/aliabd) in [PR 2280](https://github.com/gradio-app/gradio/pull/2280)\n2. Don't deploy to spaces on release by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2313](https://github.com/gradio-app/gradio/pull/2313)\n\n### Full Changelog:\n\n- Website fixes and refactoring by [@aliabd](https://github.com/aliabd) in [PR 2280](https://github.com/gradio-app/gradio/pull/2280)\n- Fix bug where max concurrency count is not respected in queue by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2286](https://github.com/gradio-app/gradio/pull/2286)\n- Promotes `postprocess` and `preprocess` to documented parameters by [@abidlabs](https://github.com/abidlabs) in [PR 2293](https://github.com/gradio-app/gradio/pull/2293)\n- Raise warning when trying to cache examples but not all inputs have examples by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2279](https://github.com/gradio-app/gradio/pull/2279)\n- fix : queue could be blocked by [@SkyTNT](https://github.com/SkyTNT) in [PR 2288](https://github.com/gradio-app/gradio/pull/2288)\n- Don't deploy to spaces on release by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2313](https://github.com/gradio-app/gradio/pull/2313)\n- Supports `gr.update()` in example caching by [@abidlabs](https://github.com/abidlabs) in [PR 2309](https://github.com/gradio-app/gradio/pull/2309)\n- Respect Upstream Queue when loading interfaces/blocks from Spaces by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2294](https://github.com/gradio-app/gradio/pull/2294)\n- Clipboard fix for iframes by [@abidlabs](https://github.com/abidlabs) in [PR 2321](https://github.com/gradio-app/gradio/pull/2321)\n- Sketching + Inpainting Capabilities to Gradio by [@abidlabs](https://github.com/abidlabs) in [PR 2144](https://github.com/gradio-app/gradio/pull/2144)\n- Update 2)key_features.md by [@voidxd](https://github.com/voidxd) in [PR 2326](https://github.com/gradio-app/gradio/pull/2326)\n- release 3.4b3 by [@abidlabs](https://github.com/abidlabs) in [PR 2328](https://github.com/gradio-app/gradio/pull/2328)\n- Fix: Dataframe column headers are reset when you add a new column by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2318](https://github.com/gradio-app/gradio/pull/2318)\n- Start queue when gradio is a sub application by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2319](https://github.com/gradio-app/gradio/pull/2319)\n- Fix Web Tracker Script by [@aliabd](https://github.com/aliabd) in [PR 2308](https://github.com/gradio-app/gradio/pull/2308)\n- Add docs to blocks context postprocessing function by [@Ian-GL](https://github.com/Ian-GL) in [PR 2332](https://github.com/gradio-app/gradio/pull/2332)\n- Fix typo in iterator variable name in run_predict function by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2340](https://github.com/gradio-app/gradio/pull/2340)\n- Add captions to galleries by [@aliabid94](https://github.com/aliabid94) in [PR 2284](https://github.com/gradio-app/gradio/pull/2284)\n- Typeable value on gradio.Slider by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2329](https://github.com/gradio-app/gradio/pull/2329)\n\n### Contributors Shoutout:\n\n- [@SkyTNT](https://github.com/SkyTNT) made their first contribution in [PR 2288](https://github.com/gradio-app/gradio/pull/2288)\n- [@voidxd](https://github.com/voidxd) made their first contribution in [PR 2326](https://github.com/gradio-app/gradio/pull/2326)\n\n## 3.3\n\n### New Features:\n\n###### 1. Iterative Outputs \u23f3\n\nYou can now create an iterative output simply by having your function return a generator!\n\nHere's (part of) an example that was used to generate the interface below it. [See full code](https://colab.research.google.com/drive/1m9bWS6B82CT7bw-m4L6AJR8za7fEK7Ov?usp=sharing).\n\n```python\ndef predict(steps, seed):\n generator = torch.manual_seed(seed)\n for i in range(1,steps):\n yield pipeline(generator=generator, num_inference_steps=i)[\"sample\"][0]\n```\n\n![example](https://user-images.githubusercontent.com/9021060/189086273-f5e7087d-71fa-4158-90a9-08e84da0421c.mp4)\n\n###### 2. Accordion Layout \ud83c\udd95\n\nThis version of Gradio introduces a new layout component to Blocks: the Accordion. Wrap your elements in a neat, expandable layout that allows users to toggle them as needed.\n\nUsage: ([Read the docs](https://gradio.app/docs/#accordion))\n\n```python\nwith gr.Accordion(\"open up\"):\n# components here\n```\n\n![accordion](https://user-images.githubusercontent.com/9021060/189088465-f0ffd7f0-fc6a-42dc-9249-11c5e1e0529b.gif)\n\n###### 3. Skops Integration \ud83d\udcc8\n\nOur new integration with [skops](https://huggingface.co/blog/skops) allows you to load tabular classification and regression models directly from the [hub](https://huggingface.co/models).\n\nHere's a classification example showing how quick it is to set up an interface for a [model](https://huggingface.co/scikit-learn/tabular-playground).\n\n```python\nimport gradio as gr\ngr.Interface.load(\"models/scikit-learn/tabular-playground\").launch()\n```\n\n![187936493-5c90c01d-a6dd-400f-aa42-833a096156a1](https://user-images.githubusercontent.com/9021060/189090519-328fbcb4-120b-43c8-aa54-d6fccfa6b7e8.png)\n\n#\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- safari fixes by [@pngwn](https://github.com/pngwn) in [PR 2138](https://github.com/gradio-app/gradio/pull/2138)\n- Fix roundedness and form borders by [@aliabid94](https://github.com/aliabid94) in [PR 2147](https://github.com/gradio-app/gradio/pull/2147)\n- Better processing of example data prior to creating dataset component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2147](https://github.com/gradio-app/gradio/pull/2147)\n- Show error on Connection drops by [@aliabid94](https://github.com/aliabid94) in [PR 2147](https://github.com/gradio-app/gradio/pull/2147)\n- 3.2 release! by [@abidlabs](https://github.com/abidlabs) in [PR 2139](https://github.com/gradio-app/gradio/pull/2139)\n- Fixed Named API Requests by [@abidlabs](https://github.com/abidlabs) in [PR 2151](https://github.com/gradio-app/gradio/pull/2151)\n- Quick Fix: Cannot upload Model3D image after clearing it by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2168](https://github.com/gradio-app/gradio/pull/2168)\n- Fixed misleading log when server_name is '0.0.0.0' by [@lamhoangtung](https://github.com/lamhoangtung) in [PR 2176](https://github.com/gradio-app/gradio/pull/2176)\n- Keep embedded PngInfo metadata by [@cobryan05](https://github.com/cobryan05) in [PR 2170](https://github.com/gradio-app/gradio/pull/2170)\n- Skops integration: Load tabular classification and regression models from the hub by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2126](https://github.com/gradio-app/gradio/pull/2126)\n- Respect original filename when cached example files are downloaded by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2145](https://github.com/gradio-app/gradio/pull/2145)\n- Add manual trigger to deploy to pypi by [@abidlabs](https://github.com/abidlabs) in [PR 2192](https://github.com/gradio-app/gradio/pull/2192)\n- Fix bugs with gr.update by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2157](https://github.com/gradio-app/gradio/pull/2157)\n- Make queue per app by [@aliabid94](https://github.com/aliabid94) in [PR 2193](https://github.com/gradio-app/gradio/pull/2193)\n- Preserve Labels In Interpretation Components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2166](https://github.com/gradio-app/gradio/pull/2166)\n- Quick Fix: Multiple file download not working by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2169](https://github.com/gradio-app/gradio/pull/2169)\n- use correct MIME type for js-script file by [@daspartho](https://github.com/daspartho) in [PR 2200](https://github.com/gradio-app/gradio/pull/2200)\n- Add accordion component by [@aliabid94](https://github.com/aliabid94) in [PR 2208](https://github.com/gradio-app/gradio/pull/2208)\n\n### Contributors Shoutout:\n\n- [@lamhoangtung](https://github.com/lamhoangtung) made their first contribution in [PR 2176](https://github.com/gradio-app/gradio/pull/2176)\n- [@cobryan05](https://github.com/cobryan05) made their first contribution in [PR 2170](https://github.com/gradio-app/gradio/pull/2170)\n- [@daspartho](https://github.com/daspartho) made their first contribution in [PR 2200](https://github.com/gradio-app/gradio/pull/2200)\n\n## 3.2\n\n### New Features:\n\n###### 1. Improvements to Queuing \ud83e\udd47\n\nWe've implemented a brand new queuing system based on **web sockets** instead of HTTP long polling. Among other things, this allows us to manage queue sizes better on Hugging Face Spaces. There are also additional queue-related parameters you can add:\n\n- Now supports concurrent workers (parallelization)\n\n```python\ndemo = gr.Interface(...)\ndemo.queue(concurrency_count=3)\ndemo.launch()\n```\n\n- Configure a maximum queue size\n\n```python\ndemo = gr.Interface(...)\ndemo.queue(max_size=100)\ndemo.launch()\n```\n\n- If a user closes their tab / browser, they leave the queue, which means the demo will run faster for everyone else\n\n###### 2. Fixes to Examples\n\n- Dataframe examples will render properly, and look much clearer in the UI: (thanks to PR #2125)\n\n![Screen Shot 2022-08-30 at 8 29 58 PM](https://user-images.githubusercontent.com/9021060/187586561-d915bafb-f968-4966-b9a2-ef41119692b2.png)\n\n- Image and Video thumbnails are cropped to look neater and more uniform: (thanks to PR #2109)\n\n![Screen Shot 2022-08-30 at 8 32 15 PM](https://user-images.githubusercontent.com/9021060/187586890-56e1e4f0-1b84-42d9-a82f-911772c41030.png)\n\n- Other fixes in PR #2131 and #2064 make it easier to design and use Examples\n\n###### 3. Component Fixes \ud83e\uddf1\n\n- Specify the width and height of an image in its style tag (thanks to PR #2133)\n\n```python\ncomponents.Image().style(height=260, width=300)\n```\n\n- Automatic conversion of videos so they are playable in the browser (thanks to PR #2003). Gradio will check if a video's format is playable in the browser and, if it isn't, will automatically convert it to a format that is (mp4).\n- Pass in a json filepath to the Label component (thanks to PR #2083)\n- Randomize the default value of a Slider (thanks to PR #1935)\n\n![slider-random](https://user-images.githubusercontent.com/9021060/187596230-3db9697f-9f4d-42f5-9387-d77573513448.gif)\n\n- Improvements to State in PR #2100\n\n###### 4. Ability to Randomize Input Sliders and Reload Data whenever the Page Loads\n\n- In some cases, you want to be able to show a different set of input data to every user as they load the page app. For example, you might want to randomize the value of a \"seed\" `Slider` input. Or you might want to show a `Textbox` with the current date. We now supporting passing _functions_ as the default value in input components. When you pass in a function, it gets **re-evaluated** every time someone loads the demo, allowing you to reload / change data for different users.\n\nHere's an example loading the current date time into an input Textbox:\n\n```python\nimport gradio as gr\nimport datetime\n\nwith gr.Blocks() as demo:\n gr.Textbox(datetime.datetime.now)\n\ndemo.launch()\n```\n\nNote that we don't evaluate the function -- `datetime.datetime.now()` -- we pass in the function itself to get this behavior -- `datetime.datetime.now`\n\nBecause randomizing the initial value of `Slider` is a common use case, we've added a `randomize` keyword argument you can use to randomize its initial value:\n\n```python\nimport gradio as gr\ndemo = gr.Interface(lambda x:x, gr.Slider(0, 10, randomize=True), \"number\")\ndemo.launch()\n```\n\n###### 5. New Guide \ud83d\udd8a\ufe0f\n\n- [Gradio and W&B Integration](https://gradio.app/Gradio_and_Wandb_Integration/)\n\n### Full Changelog:\n\n- Reset components to original state by setting value to None by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2044](https://github.com/gradio-app/gradio/pull/2044)\n- Cleaning up the way data is processed for components by [@abidlabs](https://github.com/abidlabs) in [PR 1967](https://github.com/gradio-app/gradio/pull/1967)\n- version 3.1.8b by [@abidlabs](https://github.com/abidlabs) in [PR 2063](https://github.com/gradio-app/gradio/pull/2063)\n- Wandb guide by [@AK391](https://github.com/AK391) in [PR 1898](https://github.com/gradio-app/gradio/pull/1898)\n- Add a flagging callback to save json files to a hugging face dataset by [@chrisemezue](https://github.com/chrisemezue) in [PR 1821](https://github.com/gradio-app/gradio/pull/1821)\n- Add data science demos to landing page by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2067](https://github.com/gradio-app/gradio/pull/2067)\n- Hide time series + xgboost demos by default by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2079](https://github.com/gradio-app/gradio/pull/2079)\n- Encourage people to keep trying when queue full by [@apolinario](https://github.com/apolinario) in [PR 2076](https://github.com/gradio-app/gradio/pull/2076)\n- Updated our analytics on creation of Blocks/Interface by [@abidlabs](https://github.com/abidlabs) in [PR 2082](https://github.com/gradio-app/gradio/pull/2082)\n- `Label` component now accepts file paths to `.json` files by [@abidlabs](https://github.com/abidlabs) in [PR 2083](https://github.com/gradio-app/gradio/pull/2083)\n- Fix issues related to demos in Spaces by [@abidlabs](https://github.com/abidlabs) in [PR 2086](https://github.com/gradio-app/gradio/pull/2086)\n- Fix TimeSeries examples not properly displayed in UI by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2064](https://github.com/gradio-app/gradio/pull/2064)\n- Fix infinite requests when doing tab item select by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2070](https://github.com/gradio-app/gradio/pull/2070)\n- Accept deprecated `file` route as well by [@abidlabs](https://github.com/abidlabs) in [PR 2099](https://github.com/gradio-app/gradio/pull/2099)\n- Allow frontend method execution on Block.load event by [@codedealer](https://github.com/codedealer) in [PR 2108](https://github.com/gradio-app/gradio/pull/2108)\n- Improvements to `State` by [@abidlabs](https://github.com/abidlabs) in [PR 2100](https://github.com/gradio-app/gradio/pull/2100)\n- Catch IndexError, KeyError in video_is_playable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2113](https://github.com/gradio-app/gradio/pull/2113)\n- Fix: Download button does not respect the filepath returned by the function by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2073](https://github.com/gradio-app/gradio/pull/2073)\n- Refactoring Layout: Adding column widths, forms, and more. by [@aliabid94](https://github.com/aliabid94) in [PR 2097](https://github.com/gradio-app/gradio/pull/2097)\n- Update CONTRIBUTING.md by [@abidlabs](https://github.com/abidlabs) in [PR 2118](https://github.com/gradio-app/gradio/pull/2118)\n- 2092 df ex by [@pngwn](https://github.com/pngwn) in [PR 2125](https://github.com/gradio-app/gradio/pull/2125)\n- feat(samples table/gallery): Crop thumbs to square by [@ronvoluted](https://github.com/ronvoluted) in [PR 2109](https://github.com/gradio-app/gradio/pull/2109)\n- Some enhancements to `gr.Examples` by [@abidlabs](https://github.com/abidlabs) in [PR 2131](https://github.com/gradio-app/gradio/pull/2131)\n- Image size fix by [@aliabid94](https://github.com/aliabid94) in [PR 2133](https://github.com/gradio-app/gradio/pull/2133)\n\n### Contributors Shoutout:\n\n- [@chrisemezue](https://github.com/chrisemezue) made their first contribution in [PR 1821](https://github.com/gradio-app/gradio/pull/1821)\n- [@apolinario](https://github.com/apolinario) made their first contribution in [PR 2076](https://github.com/gradio-app/gradio/pull/2076)\n- [@codedealer](https://github.com/codedealer) made their first contribution in [PR 2108](https://github.com/gradio-app/gradio/pull/2108)\n\n## 3.1\n\n### New Features:\n\n###### 1. Embedding Demos on Any Website \ud83d\udcbb\n\nWith PR #1444, Gradio is now distributed as a web component. This means demos can be natively embedded on websites. You'll just need to add two lines: one to load the gradio javascript, and one to link to the demos backend.\n\nHere's a simple example that embeds the demo from a Hugging Face space:\n\n```html\n\n\n```\n\nBut you can also embed demos that are running anywhere, you just need to link the demo to `src` instead of `space`. In fact, all the demos on the gradio website are embedded this way:\n\n\"Screen\n\nRead more in the [Embedding Gradio Demos](https://gradio.app/embedding_gradio_demos) guide.\n\n###### 2. Reload Mode \ud83d\udc68\u200d\ud83d\udcbb\n\nReload mode helps developers create gradio demos faster by automatically reloading the demo whenever the code changes. It can support development on Python IDEs (VS Code, PyCharm, etc), the terminal, as well as Jupyter notebooks.\n\nIf your demo code is in a script named `app.py`, instead of running `python app.py` you can now run `gradio app.py` and that will launch the demo in reload mode:\n\n```bash\nLaunching in reload mode on: http://127.0.0.1:7860 (Press CTRL+C to quit)\nWatching...\nWARNING: The --reload flag should not be used in production on Windows.\n```\n\nIf you're working from a Jupyter or Colab Notebook, use these magic commands instead: `%load_ext gradio` when you import gradio, and `%%blocks` in the top of the cell with the demo code. Here's an example that shows how much faster the development becomes:\n\n![Blocks](https://user-images.githubusercontent.com/9021060/178986488-ed378cc8-5141-4330-ba41-672b676863d0.gif)\n\n###### 3. Inpainting Support on `gr.Image()` \ud83c\udfa8\n\nWe updated the Image component to add support for inpainting demos. It works by adding `tool=\"sketch\"` as a parameter, that passes both an image and a sketchable mask to your prediction function.\n\nHere's an example from the [LAMA space](https://huggingface.co/spaces/akhaliq/lama):\n\n![FXApVlFVsAALSD-](https://user-images.githubusercontent.com/9021060/178989479-549867c8-7fb0-436a-a97d-1e91c9f5e611.jpeg)\n\n###### 4. Markdown and HTML support in Dataframes \ud83d\udd22\n\nWe upgraded the Dataframe component in PR #1684 to support rendering Markdown and HTML inside the cells.\n\nThis means you can build Dataframes that look like the following:\n\n![image (8)](https://user-images.githubusercontent.com/9021060/178991233-41cb07a5-e7a3-433e-89b8-319bc78eb9c2.png)\n\n###### 5. `gr.Examples()` for Blocks \ud83e\uddf1\n\nWe've added the `gr.Examples` component helper to allow you to add examples to any Blocks demo. This class is a wrapper over the `gr.Dataset` component.\n\n\"Screen\n\ngr.Examples takes two required parameters:\n\n- `examples` which takes in a nested list\n- `inputs` which takes in a component or list of components\n\nYou can read more in the [Examples docs](https://gradio.app/docs/#examples) or the [Adding Examples to your Demos guide](https://gradio.app/adding_examples_to_your_app/).\n\n###### 6. Fixes to Audio Streaming\n\nWith [PR 1828](https://github.com/gradio-app/gradio/pull/1828) we now hide the status loading animation, as well as remove the echo in streaming. Check out the [stream_audio](https://github.com/gradio-app/gradio/blob/main/demo/stream_audio/run.py) demo for more or read through our [Real Time Speech Recognition](https://gradio.app/real_time_speech_recognition/) guide.\n\n\"Screen\n\n### Full Changelog:\n\n- File component: list multiple files and allow for download #1446 by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1681](https://github.com/gradio-app/gradio/pull/1681)\n- Add ColorPicker to docs by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1768](https://github.com/gradio-app/gradio/pull/1768)\n- Mock out requests in TestRequest unit tests by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1794](https://github.com/gradio-app/gradio/pull/1794)\n- Add requirements.txt and test_files to source dist by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1817](https://github.com/gradio-app/gradio/pull/1817)\n- refactor: f-string for tunneling.py by [@nhankiet](https://github.com/nhankiet) in [PR 1819](https://github.com/gradio-app/gradio/pull/1819)\n- Miscellaneous formatting improvements to website by [@aliabd](https://github.com/aliabd) in [PR 1754](https://github.com/gradio-app/gradio/pull/1754)\n- `integrate()` method moved to `Blocks` by [@abidlabs](https://github.com/abidlabs) in [PR 1776](https://github.com/gradio-app/gradio/pull/1776)\n- Add python-3.7 tests by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1818](https://github.com/gradio-app/gradio/pull/1818)\n- Copy test dir in website dockers by [@aliabd](https://github.com/aliabd) in [PR 1827](https://github.com/gradio-app/gradio/pull/1827)\n- Add info to docs on how to set default values for components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1788](https://github.com/gradio-app/gradio/pull/1788)\n- Embedding Components on Docs by [@aliabd](https://github.com/aliabd) in [PR 1726](https://github.com/gradio-app/gradio/pull/1726)\n- Remove usage of deprecated gr.inputs and gr.outputs from website by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1796](https://github.com/gradio-app/gradio/pull/1796)\n- Some cleanups to the docs page by [@abidlabs](https://github.com/abidlabs) in [PR 1822](https://github.com/gradio-app/gradio/pull/1822)\n\n### Contributors Shoutout:\n\n- [@nhankiet](https://github.com/nhankiet) made their first contribution in [PR 1819](https://github.com/gradio-app/gradio/pull/1819)\n\n## 3.0\n\n###### \ud83d\udd25 Gradio 3.0 is the biggest update to the library, ever.\n\n### New Features:\n\n###### 1. Blocks \ud83e\uddf1\n\nBlocks is a new, low-level API that allows you to have full control over the data flows and layout of your application. It allows you to build very complex, multi-step applications. For example, you might want to:\n\n- Group together related demos as multiple tabs in one web app\n- Change the layout of your demo instead of just having all of the inputs on the left and outputs on the right\n- Have multi-step interfaces, in which the output of one model becomes the input to the next model, or have more flexible data flows in general\n- Change a component's properties (for example, the choices in a Dropdown) or its visibility based on user input\n\nHere's a simple example that creates the demo below it:\n\n```python\nimport gradio as gr\n\ndef update(name):\n return f\"Welcome to Gradio, {name}!\"\n\ndemo = gr.Blocks()\n\nwith demo:\n gr.Markdown(\n \"\"\"\n # Hello World!\n Start typing below to see the output.\n \"\"\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n\n inp.change(fn=update,\n inputs=inp,\n outputs=out)\n\ndemo.launch()\n```\n\n![hello-blocks](https://user-images.githubusercontent.com/9021060/168684108-78cbd24b-e6bd-4a04-a8d9-20d535203434.gif)\n\nRead our [Introduction to Blocks](http://gradio.app/introduction_to_blocks/) guide for more, and join the \ud83c\udf88 [Gradio Blocks Party](https://huggingface.co/spaces/Gradio-Blocks/README)!\n\n###### 2. Our Revamped Design \ud83c\udfa8\n\nWe've upgraded our design across the entire library: from components, and layouts all the way to dark mode.\n\n![kitchen_sink](https://user-images.githubusercontent.com/9021060/168686333-7a6e3096-3e23-4309-abf2-5cd7736e0463.gif)\n\n###### 3. A New Website \ud83d\udcbb\n\nWe've upgraded [gradio.app](https://gradio.app) to make it cleaner, faster and easier to use. Our docs now come with components and demos embedded directly on the page. So you can quickly get up to speed with what you're looking for.\n\n![website](https://user-images.githubusercontent.com/9021060/168687191-10d6a3bd-101f-423a-8193-48f47a5e077d.gif)\n\n###### 4. New Components: Model3D, Dataset, and More..\n\nWe've introduced a lot of new components in `3.0`, including `Model3D`, `Dataset`, `Markdown`, `Button` and `Gallery`. You can find all the components and play around with them [here](https://gradio.app/docs/#components).\n\n![Model3d](https://user-images.githubusercontent.com/9021060/168689062-6ad77151-8cc5-467d-916c-f7c78e52ec0c.gif)\n\n### Full Changelog:\n\n- Gradio dash fe by [@pngwn](https://github.com/pngwn) in [PR 807](https://github.com/gradio-app/gradio/pull/807)\n- Blocks components by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 765](https://github.com/gradio-app/gradio/pull/765)\n- Blocks components V2 by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 843](https://github.com/gradio-app/gradio/pull/843)\n- Blocks-Backend-Events by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 844](https://github.com/gradio-app/gradio/pull/844)\n- Interfaces from Blocks by [@aliabid94](https://github.com/aliabid94) in [PR 849](https://github.com/gradio-app/gradio/pull/849)\n- Blocks dev by [@aliabid94](https://github.com/aliabid94) in [PR 853](https://github.com/gradio-app/gradio/pull/853)\n- Started updating demos to use the new `gradio.components` syntax by [@abidlabs](https://github.com/abidlabs) in [PR 848](https://github.com/gradio-app/gradio/pull/848)\n- add test infra + add browser tests to CI by [@pngwn](https://github.com/pngwn) in [PR 852](https://github.com/gradio-app/gradio/pull/852)\n- 854 textbox by [@pngwn](https://github.com/pngwn) in [PR 859](https://github.com/gradio-app/gradio/pull/859)\n- Getting old Python unit tests to pass on `blocks-dev` by [@abidlabs](https://github.com/abidlabs) in [PR 861](https://github.com/gradio-app/gradio/pull/861)\n- initialise chatbot with empty array of messages by [@pngwn](https://github.com/pngwn) in [PR 867](https://github.com/gradio-app/gradio/pull/867)\n- add test for output to input by [@pngwn](https://github.com/pngwn) in [PR 866](https://github.com/gradio-app/gradio/pull/866)\n- More Interface -> Blocks features by [@aliabid94](https://github.com/aliabid94) in [PR 864](https://github.com/gradio-app/gradio/pull/864)\n- Fixing external.py in blocks-dev to reflect the new HF Spaces paths by [@abidlabs](https://github.com/abidlabs) in [PR 879](https://github.com/gradio-app/gradio/pull/879)\n- backend_default_value_refactoring by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 871](https://github.com/gradio-app/gradio/pull/871)\n- fix default_value by [@pngwn](https://github.com/pngwn) in [PR 869](https://github.com/gradio-app/gradio/pull/869)\n- fix buttons by [@aliabid94](https://github.com/aliabid94) in [PR 883](https://github.com/gradio-app/gradio/pull/883)\n- Checking and updating more demos to use 3.0 syntax by [@abidlabs](https://github.com/abidlabs) in [PR 892](https://github.com/gradio-app/gradio/pull/892)\n- Blocks Tests by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 902](https://github.com/gradio-app/gradio/pull/902)\n- Interface fix by [@pngwn](https://github.com/pngwn) in [PR 901](https://github.com/gradio-app/gradio/pull/901)\n- Quick fix: Issue 893 by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 907](https://github.com/gradio-app/gradio/pull/907)\n- 3d Image Component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 775](https://github.com/gradio-app/gradio/pull/775)\n- fix endpoint url in prod by [@pngwn](https://github.com/pngwn) in [PR 911](https://github.com/gradio-app/gradio/pull/911)\n- rename Model3d to Image3D by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 912](https://github.com/gradio-app/gradio/pull/912)\n- update pypi to 2.9.1 by [@abidlabs](https://github.com/abidlabs) in [PR 916](https://github.com/gradio-app/gradio/pull/916)\n- blocks-with-fix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 917](https://github.com/gradio-app/gradio/pull/917)\n- Restore Interpretation, Live, Auth, Queueing by [@aliabid94](https://github.com/aliabid94) in [PR 915](https://github.com/gradio-app/gradio/pull/915)\n- Allow `Blocks` instances to be used like a `Block` in other `Blocks` by [@abidlabs](https://github.com/abidlabs) in [PR 919](https://github.com/gradio-app/gradio/pull/919)\n- Redesign 1 by [@pngwn](https://github.com/pngwn) in [PR 918](https://github.com/gradio-app/gradio/pull/918)\n- blocks-components-tests by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 904](https://github.com/gradio-app/gradio/pull/904)\n- fix unit + browser tests by [@pngwn](https://github.com/pngwn) in [PR 926](https://github.com/gradio-app/gradio/pull/926)\n- blocks-move-test-data by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 927](https://github.com/gradio-app/gradio/pull/927)\n- remove debounce from form inputs by [@pngwn](https://github.com/pngwn) in [PR 932](https://github.com/gradio-app/gradio/pull/932)\n- reimplement webcam video by [@pngwn](https://github.com/pngwn) in [PR 928](https://github.com/gradio-app/gradio/pull/928)\n- blocks-move-test-data by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 941](https://github.com/gradio-app/gradio/pull/941)\n- allow audio components to take a string value by [@pngwn](https://github.com/pngwn) in [PR 930](https://github.com/gradio-app/gradio/pull/930)\n- static mode for textbox by [@pngwn](https://github.com/pngwn) in [PR 929](https://github.com/gradio-app/gradio/pull/929)\n- fix file upload text by [@pngwn](https://github.com/pngwn) in [PR 931](https://github.com/gradio-app/gradio/pull/931)\n- tabbed-interface-rewritten by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 958](https://github.com/gradio-app/gradio/pull/958)\n- Gan demo fix by [@abidlabs](https://github.com/abidlabs) in [PR 965](https://github.com/gradio-app/gradio/pull/965)\n- Blocks analytics by [@abidlabs](https://github.com/abidlabs) in [PR 947](https://github.com/gradio-app/gradio/pull/947)\n- Blocks page load by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 963](https://github.com/gradio-app/gradio/pull/963)\n- add frontend for page load events by [@pngwn](https://github.com/pngwn) in [PR 967](https://github.com/gradio-app/gradio/pull/967)\n- fix i18n and some tweaks by [@pngwn](https://github.com/pngwn) in [PR 966](https://github.com/gradio-app/gradio/pull/966)\n- add jinja2 to reqs by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 969](https://github.com/gradio-app/gradio/pull/969)\n- Cleaning up `Launchable()` by [@abidlabs](https://github.com/abidlabs) in [PR 968](https://github.com/gradio-app/gradio/pull/968)\n- Fix #944 by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 971](https://github.com/gradio-app/gradio/pull/971)\n- New Blocks Demo: neural instrument cloning by [@abidlabs](https://github.com/abidlabs) in [PR 975](https://github.com/gradio-app/gradio/pull/975)\n- Add huggingface_hub client library by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 973](https://github.com/gradio-app/gradio/pull/973)\n- State and variables by [@aliabid94](https://github.com/aliabid94) in [PR 977](https://github.com/gradio-app/gradio/pull/977)\n- update-components by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 986](https://github.com/gradio-app/gradio/pull/986)\n- ensure dataframe updates as expected by [@pngwn](https://github.com/pngwn) in [PR 981](https://github.com/gradio-app/gradio/pull/981)\n- test-guideline by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 990](https://github.com/gradio-app/gradio/pull/990)\n- Issue #785: add footer by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 972](https://github.com/gradio-app/gradio/pull/972)\n- indentation fix by [@abidlabs](https://github.com/abidlabs) in [PR 993](https://github.com/gradio-app/gradio/pull/993)\n- missing quote by [@aliabd](https://github.com/aliabd) in [PR 996](https://github.com/gradio-app/gradio/pull/996)\n- added interactive parameter to components by [@abidlabs](https://github.com/abidlabs) in [PR 992](https://github.com/gradio-app/gradio/pull/992)\n- custom-components by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 985](https://github.com/gradio-app/gradio/pull/985)\n- Refactor component shortcuts by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 995](https://github.com/gradio-app/gradio/pull/995)\n- Plot Component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 805](https://github.com/gradio-app/gradio/pull/805)\n- updated PyPi version to 2.9.2 by [@abidlabs](https://github.com/abidlabs) in [PR 1002](https://github.com/gradio-app/gradio/pull/1002)\n- Release 2.9.3 by [@abidlabs](https://github.com/abidlabs) in [PR 1003](https://github.com/gradio-app/gradio/pull/1003)\n- Image3D Examples Fix by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1001](https://github.com/gradio-app/gradio/pull/1001)\n- release 2.9.4 by [@abidlabs](https://github.com/abidlabs) in [PR 1006](https://github.com/gradio-app/gradio/pull/1006)\n- templates import hotfix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1008](https://github.com/gradio-app/gradio/pull/1008)\n- Progress indicator bar by [@aliabid94](https://github.com/aliabid94) in [PR 997](https://github.com/gradio-app/gradio/pull/997)\n- Fixed image input for absolute path by [@JefferyChiang](https://github.com/JefferyChiang) in [PR 1004](https://github.com/gradio-app/gradio/pull/1004)\n- Model3D + Plot Components by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1010](https://github.com/gradio-app/gradio/pull/1010)\n- Gradio Guides: Creating CryptoPunks with GANs by [@NimaBoscarino](https://github.com/NimaBoscarino) in [PR 1000](https://github.com/gradio-app/gradio/pull/1000)\n- [BIG PR] Gradio blocks & redesigned components by [@abidlabs](https://github.com/abidlabs) in [PR 880](https://github.com/gradio-app/gradio/pull/880)\n- fixed failing test on main by [@abidlabs](https://github.com/abidlabs) in [PR 1023](https://github.com/gradio-app/gradio/pull/1023)\n- Use smaller ASR model in external test by [@abidlabs](https://github.com/abidlabs) in [PR 1024](https://github.com/gradio-app/gradio/pull/1024)\n- updated PyPi version to 2.9.0b by [@abidlabs](https://github.com/abidlabs) in [PR 1026](https://github.com/gradio-app/gradio/pull/1026)\n- Fixing import issues so that the package successfully installs on colab notebooks by [@abidlabs](https://github.com/abidlabs) in [PR 1027](https://github.com/gradio-app/gradio/pull/1027)\n- Update website tracker slackbot by [@aliabd](https://github.com/aliabd) in [PR 1037](https://github.com/gradio-app/gradio/pull/1037)\n- textbox-autoheight by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1009](https://github.com/gradio-app/gradio/pull/1009)\n- Model3D Examples fixes by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1035](https://github.com/gradio-app/gradio/pull/1035)\n- GAN Gradio Guide: Adjustments to iframe heights by [@NimaBoscarino](https://github.com/NimaBoscarino) in [PR 1042](https://github.com/gradio-app/gradio/pull/1042)\n- added better default labels to form components by [@abidlabs](https://github.com/abidlabs) in [PR 1040](https://github.com/gradio-app/gradio/pull/1040)\n- Slackbot web tracker fix by [@aliabd](https://github.com/aliabd) in [PR 1043](https://github.com/gradio-app/gradio/pull/1043)\n- Plot fixes by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1044](https://github.com/gradio-app/gradio/pull/1044)\n- Small fixes to the demos by [@abidlabs](https://github.com/abidlabs) in [PR 1030](https://github.com/gradio-app/gradio/pull/1030)\n- fixing demo issue with website by [@aliabd](https://github.com/aliabd) in [PR 1047](https://github.com/gradio-app/gradio/pull/1047)\n- [hotfix] HighlightedText by [@aliabid94](https://github.com/aliabid94) in [PR 1046](https://github.com/gradio-app/gradio/pull/1046)\n- Update text by [@ronvoluted](https://github.com/ronvoluted) in [PR 1050](https://github.com/gradio-app/gradio/pull/1050)\n- Update CONTRIBUTING.md by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1052](https://github.com/gradio-app/gradio/pull/1052)\n- fix(ui): Increase contrast for footer by [@ronvoluted](https://github.com/ronvoluted) in [PR 1048](https://github.com/gradio-app/gradio/pull/1048)\n- UI design update by [@gary149](https://github.com/gary149) in [PR 1041](https://github.com/gradio-app/gradio/pull/1041)\n- updated PyPi version to 2.9.0b8 by [@abidlabs](https://github.com/abidlabs) in [PR 1059](https://github.com/gradio-app/gradio/pull/1059)\n- Running, testing, and fixing demos by [@abidlabs](https://github.com/abidlabs) in [PR 1060](https://github.com/gradio-app/gradio/pull/1060)\n- Form layout by [@pngwn](https://github.com/pngwn) in [PR 1054](https://github.com/gradio-app/gradio/pull/1054)\n- inputless-interfaces by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1038](https://github.com/gradio-app/gradio/pull/1038)\n- Update PULL_REQUEST_TEMPLATE.md by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1068](https://github.com/gradio-app/gradio/pull/1068)\n- Upgrading node memory to 4gb in website Docker by [@aliabd](https://github.com/aliabd) in [PR 1069](https://github.com/gradio-app/gradio/pull/1069)\n- Website reload error by [@aliabd](https://github.com/aliabd) in [PR 1079](https://github.com/gradio-app/gradio/pull/1079)\n- fixed favicon issue by [@abidlabs](https://github.com/abidlabs) in [PR 1064](https://github.com/gradio-app/gradio/pull/1064)\n- remove-queue-from-events by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1056](https://github.com/gradio-app/gradio/pull/1056)\n- Enable vertex colors for OBJs files by [@radames](https://github.com/radames) in [PR 1074](https://github.com/gradio-app/gradio/pull/1074)\n- Dark text by [@ronvoluted](https://github.com/ronvoluted) in [PR 1049](https://github.com/gradio-app/gradio/pull/1049)\n- Scroll to output by [@pngwn](https://github.com/pngwn) in [PR 1077](https://github.com/gradio-app/gradio/pull/1077)\n- Explicitly list pnpm version 6 in contributing guide by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1085](https://github.com/gradio-app/gradio/pull/1085)\n- hotfix for encrypt issue by [@abidlabs](https://github.com/abidlabs) in [PR 1096](https://github.com/gradio-app/gradio/pull/1096)\n- Release 2.9b9 by [@abidlabs](https://github.com/abidlabs) in [PR 1098](https://github.com/gradio-app/gradio/pull/1098)\n- tweak node circleci settings by [@pngwn](https://github.com/pngwn) in [PR 1091](https://github.com/gradio-app/gradio/pull/1091)\n- Website Reload Error by [@aliabd](https://github.com/aliabd) in [PR 1099](https://github.com/gradio-app/gradio/pull/1099)\n- Website Reload: README in demos docker by [@aliabd](https://github.com/aliabd) in [PR 1100](https://github.com/gradio-app/gradio/pull/1100)\n- Flagging fixes by [@abidlabs](https://github.com/abidlabs) in [PR 1081](https://github.com/gradio-app/gradio/pull/1081)\n- Backend for optional labels by [@abidlabs](https://github.com/abidlabs) in [PR 1080](https://github.com/gradio-app/gradio/pull/1080)\n- Optional labels fe by [@pngwn](https://github.com/pngwn) in [PR 1105](https://github.com/gradio-app/gradio/pull/1105)\n- clean-deprecated-parameters by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1090](https://github.com/gradio-app/gradio/pull/1090)\n- Blocks rendering fix by [@abidlabs](https://github.com/abidlabs) in [PR 1102](https://github.com/gradio-app/gradio/pull/1102)\n- Redos #1106 by [@abidlabs](https://github.com/abidlabs) in [PR 1112](https://github.com/gradio-app/gradio/pull/1112)\n- Interface types: handle input-only, output-only, and unified interfaces by [@abidlabs](https://github.com/abidlabs) in [PR 1108](https://github.com/gradio-app/gradio/pull/1108)\n- Hotfix + New pypi release 2.9b11 by [@abidlabs](https://github.com/abidlabs) in [PR 1118](https://github.com/gradio-app/gradio/pull/1118)\n- issue-checkbox by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1122](https://github.com/gradio-app/gradio/pull/1122)\n- issue-checkbox-hotfix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1127](https://github.com/gradio-app/gradio/pull/1127)\n- Fix demos in website by [@aliabd](https://github.com/aliabd) in [PR 1130](https://github.com/gradio-app/gradio/pull/1130)\n- Guide for Gradio ONNX model zoo on Huggingface by [@AK391](https://github.com/AK391) in [PR 1073](https://github.com/gradio-app/gradio/pull/1073)\n- ONNX guide fixes by [@aliabd](https://github.com/aliabd) in [PR 1131](https://github.com/gradio-app/gradio/pull/1131)\n- Stacked form inputs css by [@gary149](https://github.com/gary149) in [PR 1134](https://github.com/gradio-app/gradio/pull/1134)\n- made default value in textbox empty string by [@abidlabs](https://github.com/abidlabs) in [PR 1135](https://github.com/gradio-app/gradio/pull/1135)\n- Examples UI by [@gary149](https://github.com/gary149) in [PR 1121](https://github.com/gradio-app/gradio/pull/1121)\n- Chatbot custom color support by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1092](https://github.com/gradio-app/gradio/pull/1092)\n- highlighted text colors by [@pngwn](https://github.com/pngwn) in [PR 1119](https://github.com/gradio-app/gradio/pull/1119)\n- pin to pnpm 6 for now by [@pngwn](https://github.com/pngwn) in [PR 1147](https://github.com/gradio-app/gradio/pull/1147)\n- Restore queue in Blocks by [@aliabid94](https://github.com/aliabid94) in [PR 1137](https://github.com/gradio-app/gradio/pull/1137)\n- add select event for tabitems by [@pngwn](https://github.com/pngwn) in [PR 1154](https://github.com/gradio-app/gradio/pull/1154)\n- max_lines + autoheight for textbox by [@pngwn](https://github.com/pngwn) in [PR 1153](https://github.com/gradio-app/gradio/pull/1153)\n- use color palette for chatbot by [@pngwn](https://github.com/pngwn) in [PR 1152](https://github.com/gradio-app/gradio/pull/1152)\n- Timeseries improvements by [@pngwn](https://github.com/pngwn) in [PR 1149](https://github.com/gradio-app/gradio/pull/1149)\n- move styling for interface panels to frontend by [@pngwn](https://github.com/pngwn) in [PR 1146](https://github.com/gradio-app/gradio/pull/1146)\n- html tweaks by [@pngwn](https://github.com/pngwn) in [PR 1145](https://github.com/gradio-app/gradio/pull/1145)\n- Issue #768: Support passing none to resize and crop image by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1144](https://github.com/gradio-app/gradio/pull/1144)\n- image gallery component + img css by [@aliabid94](https://github.com/aliabid94) in [PR 1140](https://github.com/gradio-app/gradio/pull/1140)\n- networking tweak by [@abidlabs](https://github.com/abidlabs) in [PR 1143](https://github.com/gradio-app/gradio/pull/1143)\n- Allow enabling queue per event listener by [@aliabid94](https://github.com/aliabid94) in [PR 1155](https://github.com/gradio-app/gradio/pull/1155)\n- config hotfix and v. 2.9b23 by [@abidlabs](https://github.com/abidlabs) in [PR 1158](https://github.com/gradio-app/gradio/pull/1158)\n- Custom JS calls by [@aliabid94](https://github.com/aliabid94) in [PR 1082](https://github.com/gradio-app/gradio/pull/1082)\n- Small fixes: queue default fix, ffmpeg installation message by [@abidlabs](https://github.com/abidlabs) in [PR 1159](https://github.com/gradio-app/gradio/pull/1159)\n- formatting by [@abidlabs](https://github.com/abidlabs) in [PR 1161](https://github.com/gradio-app/gradio/pull/1161)\n- enable flex grow for gr-box by [@radames](https://github.com/radames) in [PR 1165](https://github.com/gradio-app/gradio/pull/1165)\n- 1148 loading by [@pngwn](https://github.com/pngwn) in [PR 1164](https://github.com/gradio-app/gradio/pull/1164)\n- Put enable_queue kwarg back in launch() by [@aliabid94](https://github.com/aliabid94) in [PR 1167](https://github.com/gradio-app/gradio/pull/1167)\n- A few small fixes by [@abidlabs](https://github.com/abidlabs) in [PR 1171](https://github.com/gradio-app/gradio/pull/1171)\n- Hotfix for dropdown component by [@abidlabs](https://github.com/abidlabs) in [PR 1172](https://github.com/gradio-app/gradio/pull/1172)\n- use secondary buttons in interface by [@pngwn](https://github.com/pngwn) in [PR 1173](https://github.com/gradio-app/gradio/pull/1173)\n- 1183 component height by [@pngwn](https://github.com/pngwn) in [PR 1185](https://github.com/gradio-app/gradio/pull/1185)\n- 962 dataframe by [@pngwn](https://github.com/pngwn) in [PR 1186](https://github.com/gradio-app/gradio/pull/1186)\n- update-contributing by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1188](https://github.com/gradio-app/gradio/pull/1188)\n- Table tweaks by [@pngwn](https://github.com/pngwn) in [PR 1195](https://github.com/gradio-app/gradio/pull/1195)\n- wrap tab content in column by [@pngwn](https://github.com/pngwn) in [PR 1200](https://github.com/gradio-app/gradio/pull/1200)\n- WIP: Add dark mode support by [@gary149](https://github.com/gary149) in [PR 1187](https://github.com/gradio-app/gradio/pull/1187)\n- Restored /api/predict/ endpoint for Interfaces by [@abidlabs](https://github.com/abidlabs) in [PR 1199](https://github.com/gradio-app/gradio/pull/1199)\n- hltext-label by [@pngwn](https://github.com/pngwn) in [PR 1204](https://github.com/gradio-app/gradio/pull/1204)\n- add copy functionality to json by [@pngwn](https://github.com/pngwn) in [PR 1205](https://github.com/gradio-app/gradio/pull/1205)\n- Update component config by [@aliabid94](https://github.com/aliabid94) in [PR 1089](https://github.com/gradio-app/gradio/pull/1089)\n- fix placeholder prompt by [@pngwn](https://github.com/pngwn) in [PR 1215](https://github.com/gradio-app/gradio/pull/1215)\n- ensure webcam video value is propagated correctly by [@pngwn](https://github.com/pngwn) in [PR 1218](https://github.com/gradio-app/gradio/pull/1218)\n- Automatic word-break in highlighted text, combine_adjacent support by [@aliabid94](https://github.com/aliabid94) in [PR 1209](https://github.com/gradio-app/gradio/pull/1209)\n- async-function-support by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1190](https://github.com/gradio-app/gradio/pull/1190)\n- Sharing fix for assets by [@aliabid94](https://github.com/aliabid94) in [PR 1208](https://github.com/gradio-app/gradio/pull/1208)\n- Hotfixes for course demos by [@abidlabs](https://github.com/abidlabs) in [PR 1222](https://github.com/gradio-app/gradio/pull/1222)\n- Allow Custom CSS by [@aliabid94](https://github.com/aliabid94) in [PR 1170](https://github.com/gradio-app/gradio/pull/1170)\n- share-hotfix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1226](https://github.com/gradio-app/gradio/pull/1226)\n- tweaks by [@pngwn](https://github.com/pngwn) in [PR 1229](https://github.com/gradio-app/gradio/pull/1229)\n- white space for class concatenation by [@radames](https://github.com/radames) in [PR 1228](https://github.com/gradio-app/gradio/pull/1228)\n- Tweaks by [@pngwn](https://github.com/pngwn) in [PR 1230](https://github.com/gradio-app/gradio/pull/1230)\n- css tweaks by [@pngwn](https://github.com/pngwn) in [PR 1235](https://github.com/gradio-app/gradio/pull/1235)\n- ensure defaults height match for media inputs by [@pngwn](https://github.com/pngwn) in [PR 1236](https://github.com/gradio-app/gradio/pull/1236)\n- Default Label label value by [@radames](https://github.com/radames) in [PR 1239](https://github.com/gradio-app/gradio/pull/1239)\n- update-shortcut-syntax by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1234](https://github.com/gradio-app/gradio/pull/1234)\n- Update version.txt by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1244](https://github.com/gradio-app/gradio/pull/1244)\n- Layout bugs by [@pngwn](https://github.com/pngwn) in [PR 1246](https://github.com/gradio-app/gradio/pull/1246)\n- Update demo by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1253](https://github.com/gradio-app/gradio/pull/1253)\n- Button default name by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1243](https://github.com/gradio-app/gradio/pull/1243)\n- Labels spacing by [@gary149](https://github.com/gary149) in [PR 1254](https://github.com/gradio-app/gradio/pull/1254)\n- add global loader for gradio app by [@pngwn](https://github.com/pngwn) in [PR 1251](https://github.com/gradio-app/gradio/pull/1251)\n- ui apis for dalle-mini by [@pngwn](https://github.com/pngwn) in [PR 1258](https://github.com/gradio-app/gradio/pull/1258)\n- Add precision to Number, backend only by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1125](https://github.com/gradio-app/gradio/pull/1125)\n- Website Design Changes by [@abidlabs](https://github.com/abidlabs) in [PR 1015](https://github.com/gradio-app/gradio/pull/1015)\n- Small fixes for multiple demos compatible with 3.0 by [@radames](https://github.com/radames) in [PR 1257](https://github.com/gradio-app/gradio/pull/1257)\n- Issue #1160: Model 3D component not destroyed correctly by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1219](https://github.com/gradio-app/gradio/pull/1219)\n- Fixes to components by [@abidlabs](https://github.com/abidlabs) in [PR 1260](https://github.com/gradio-app/gradio/pull/1260)\n- layout docs by [@abidlabs](https://github.com/abidlabs) in [PR 1263](https://github.com/gradio-app/gradio/pull/1263)\n- Static forms by [@pngwn](https://github.com/pngwn) in [PR 1264](https://github.com/gradio-app/gradio/pull/1264)\n- Cdn assets by [@pngwn](https://github.com/pngwn) in [PR 1265](https://github.com/gradio-app/gradio/pull/1265)\n- update logo by [@gary149](https://github.com/gary149) in [PR 1266](https://github.com/gradio-app/gradio/pull/1266)\n- fix slider by [@aliabid94](https://github.com/aliabid94) in [PR 1268](https://github.com/gradio-app/gradio/pull/1268)\n- maybe fix auth in iframes by [@pngwn](https://github.com/pngwn) in [PR 1261](https://github.com/gradio-app/gradio/pull/1261)\n- Improves \"Getting Started\" guide by [@abidlabs](https://github.com/abidlabs) in [PR 1269](https://github.com/gradio-app/gradio/pull/1269)\n- Add embedded demos to website by [@aliabid94](https://github.com/aliabid94) in [PR 1270](https://github.com/gradio-app/gradio/pull/1270)\n- Label hotfixes by [@abidlabs](https://github.com/abidlabs) in [PR 1281](https://github.com/gradio-app/gradio/pull/1281)\n- General tweaks by [@pngwn](https://github.com/pngwn) in [PR 1276](https://github.com/gradio-app/gradio/pull/1276)\n- only affect links within the document by [@pngwn](https://github.com/pngwn) in [PR 1282](https://github.com/gradio-app/gradio/pull/1282)\n- release 3.0b9 by [@abidlabs](https://github.com/abidlabs) in [PR 1283](https://github.com/gradio-app/gradio/pull/1283)\n- Dm by [@pngwn](https://github.com/pngwn) in [PR 1284](https://github.com/gradio-app/gradio/pull/1284)\n- Website fixes by [@aliabd](https://github.com/aliabd) in [PR 1286](https://github.com/gradio-app/gradio/pull/1286)\n- Create Streamables by [@aliabid94](https://github.com/aliabid94) in [PR 1279](https://github.com/gradio-app/gradio/pull/1279)\n- ensure table works on mobile by [@pngwn](https://github.com/pngwn) in [PR 1277](https://github.com/gradio-app/gradio/pull/1277)\n- changes by [@aliabid94](https://github.com/aliabid94) in [PR 1287](https://github.com/gradio-app/gradio/pull/1287)\n- demo alignment on landing page by [@aliabd](https://github.com/aliabd) in [PR 1288](https://github.com/gradio-app/gradio/pull/1288)\n- New meta img by [@aliabd](https://github.com/aliabd) in [PR 1289](https://github.com/gradio-app/gradio/pull/1289)\n- updated PyPi version to 3.0 by [@abidlabs](https://github.com/abidlabs) in [PR 1290](https://github.com/gradio-app/gradio/pull/1290)\n- Fix site by [@aliabid94](https://github.com/aliabid94) in [PR 1291](https://github.com/gradio-app/gradio/pull/1291)\n- Mobile responsive guides by [@aliabd](https://github.com/aliabd) in [PR 1293](https://github.com/gradio-app/gradio/pull/1293)\n- Update readme by [@abidlabs](https://github.com/abidlabs) in [PR 1292](https://github.com/gradio-app/gradio/pull/1292)\n- gif by [@abidlabs](https://github.com/abidlabs) in [PR 1296](https://github.com/gradio-app/gradio/pull/1296)\n- Allow decoding headerless b64 string [@1lint](https://github.com/1lint) in [PR 4031](https://github.com/gradio-app/gradio/pull/4031)\n\n### Contributors Shoutout:\n\n- [@JefferyChiang](https://github.com/JefferyChiang) made their first contribution in [PR 1004](https://github.com/gradio-app/gradio/pull/1004)\n- [@NimaBoscarino](https://github.com/NimaBoscarino) made their first contribution in [PR 1000](https://github.com/gradio-app/gradio/pull/1000)\n- [@ronvoluted](https://github.com/ronvoluted) made their first contribution in [PR 1050](https://github.com/gradio-app/gradio/pull/1050)\n- [@radames](https://github.com/radames) made their first contribution in [PR 1074](https://github.com/gradio-app/gradio/pull/1074)\n- [@freddyaboulton](https://github.com/freddyaboulton) made their first contribution in [PR 1085](https://github.com/gradio-app/gradio/pull/1085)\n- [@liteli1987gmail](https://github.com/liteli1987gmail) & [@chenglu](https://github.com/chenglu) made their first contribution in [PR 4767](https://github.com/gradio-app/gradio/pull/4767)" -} +{"content": "# Changelog\n\n## 3.39.0\n\n### Highlights\n\n#### Create Discord Bots from Gradio Apps \ud83e\udd16 ([#4960](https://github.com/gradio-app/gradio/pull/4960) [`46e4ef67`](https://github.com/gradio-app/gradio/commit/46e4ef67d287dd68a91473b73172b29cbad064bc))\n\nWe're excited to announce that Gradio can now automatically create a discord bot from any `gr.ChatInterface` app.\n\nIt's as easy as importing `gradio_client`, connecting to the app, and calling `deploy_discord`!\n\n_\ud83e\udd99 Turning Llama 2 70b into a discord bot \ud83e\udd99_\n\n```python\nimport gradio_client as grc\ngrc.Client(\"ysharma/Explore_llamav2_with_TGI\").deploy_discord(to_id=\"llama2-70b-discord-bot\")\n```\n\n\n\n#### Getting started with template spaces\n\nTo help get you started, we have created an organization on Hugging Face called [gradio-discord-bots](https://huggingface.co/gradio-discord-bots) with template spaces you can use to turn state of the art LLMs powered by Gradio to discord bots.\n\nCurrently we have template spaces for:\n\n- [Llama-2-70b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-70b-chat-hf) powered by a FREE Hugging Face Inference Endpoint!\n- [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-13b-chat-hf) powered by Hugging Face Inference Endpoints.\n- [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/llama-2-13b-chat-transformers) powered by Hugging Face transformers.\n- [falcon-7b-instruct](https://huggingface.co/spaces/gradio-discord-bots/falcon-7b-instruct) powered by Hugging Face Inference Endpoints.\n- [gpt-3.5-turbo](https://huggingface.co/spaces/gradio-discord-bots/gpt-35-turbo), powered by openai. Requires an OpenAI key.\n\nBut once again, you can deploy ANY `gr.ChatInterface` app exposed on the internet! So don't hesitate to try it on your own Chatbots.\n\n\u2757\ufe0f Additional Note \u2757\ufe0f: Technically, any gradio app that exposes an api route that takes in a single string and outputs a single string can be deployed to discord. But `gr.ChatInterface` apps naturally lend themselves to discord's chat functionality so we suggest you start with those.\n\nThanks [@freddyaboulton](https://github.com/freddyaboulton)!\n\n### Features\n\n- [#4995](https://github.com/gradio-app/gradio/pull/4995) [`3f8c210b`](https://github.com/gradio-app/gradio/commit/3f8c210b01ef1ceaaf8ee73be4bf246b5b745bbf) - Implement left and right click in `Gallery` component and show implicit images in `Gallery` grid. Thanks [@hannahblair](https://github.com/hannahblair)!\n- [#4993](https://github.com/gradio-app/gradio/pull/4993) [`dc07a9f9`](https://github.com/gradio-app/gradio/commit/dc07a9f947de44b419d8384987a02dcf94977851) - Bringing back the \"Add download button for audio\" PR by [@leuryr](https://github.com/leuryr). Thanks [@abidlabs](https://github.com/abidlabs)!\n- [#4979](https://github.com/gradio-app/gradio/pull/4979) [`44ac8ad0`](https://github.com/gradio-app/gradio/commit/44ac8ad08d82ea12c503dde5c78f999eb0452de2) - Allow setting sketch color default. Thanks [@aliabid94](https://github.com/aliabid94)!\n- [#4985](https://github.com/gradio-app/gradio/pull/4985) [`b74f8453`](https://github.com/gradio-app/gradio/commit/b74f8453034328f0e42da8e41785f5eb039b45d7) - Adds `additional_inputs` to `gr.ChatInterface`. Thanks [@abidlabs](https://github.com/abidlabs)!\n\n### Fixes\n\n- [#4997](https://github.com/gradio-app/gradio/pull/4997) [`41c83070`](https://github.com/gradio-app/gradio/commit/41c83070b01632084e7d29123048a96c1e261407) - Add CSS resets and specifiers to play nice with HF blog. Thanks [@aliabid94](https://github.com/aliabid94)!\n\n## 3.38\n\n### New Features:\n\n- Provide a parameter `animate` (`False` by default) in `gr.make_waveform()` which animates the overlayed waveform by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4918](https://github.com/gradio-app/gradio/pull/4918)\n- Add `show_download_button` param to allow the download button in static Image components to be hidden by [@hannahblair](https://github.com/hannahblair) in [PR 4959](https://github.com/gradio-app/gradio/pull/4959)\n- Added autofocus argument to Textbox by [@aliabid94](https://github.com/aliabid94) in [PR 4978](https://github.com/gradio-app/gradio/pull/4978)\n- The `gr.ChatInterface` UI now converts the \"Submit\" button to a \"Stop\" button in ChatInterface while streaming, which can be used to pause generation. By [@abidlabs](https://github.com/abidlabs) in [PR 4971](https://github.com/gradio-app/gradio/pull/4971).\n- Add a `border_color_accent_subdued` theme variable to add a subdued border color to accented items. This is used by chatbot user messages. Set the value of this variable in `Default` theme to `*primary_200`. By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4989](https://github.com/gradio-app/gradio/pull/4989)\n- Add default sketch color argument `brush_color`. Also, masks drawn on images are now slightly translucent (and mask color can also be set via brush_color). By [@aliabid94](https://github.com/aliabid94) in [PR 4979](https://github.com/gradio-app/gradio/pull/4979)\n\n### Bug Fixes:\n\n- Fixes `cancels` for generators so that if a generator is canceled before it is complete, subsequent runs of the event do not continue from the previous iteration, but rather start from the beginning. By [@abidlabs](https://github.com/abidlabs) in [PR 4969](https://github.com/gradio-app/gradio/pull/4969).\n- Use `gr.State` in `gr.ChatInterface` to reduce latency by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4976](https://github.com/gradio-app/gradio/pull/4976)\n- Fix bug with `gr.Interface` where component labels inferred from handler parameters were including special args like `gr.Request` or `gr.EventData`. By [@cbensimon](https://github.com/cbensimon) in [PR 4956](https://github.com/gradio-app/gradio/pull/4956)\n\n#\n\n### Other Changes:\n\n- Apply pyright to the `components` directory by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4948](https://github.com/gradio-app/gradio/pull/4948)\n- Improved look of ChatInterface by [@aliabid94](https://github.com/aliabid94) in [PR 4978](https://github.com/gradio-app/gradio/pull/4978)\n\n## 3.37\n\n### New Features:\n\nIntroducing a new `gr.ChatInterface` abstraction, which allows Gradio users to build fully functioning Chat interfaces very easily. The only required parameter is a chat function `fn`, which accepts a (string) user input `message` and a (list of lists) chat `history` and returns a (string) response. Here's a toy example:\n\n```py\nimport gradio as gr\n\ndef echo(message, history):\n return message\n\ndemo = gr.ChatInterface(fn=echo, examples=[\"hello\", \"hola\", \"merhaba\"], title=\"Echo Bot\")\ndemo.launch()\n```\n\nWhich produces:\n\n\"image\"\n\nAnd a corresponding easy-to-use API at `/chat`:\n\n\"image\"\n\nThe `gr.ChatInterface` abstraction works nicely with various LLM libraries, such as `langchain`. See the [dedicated guide](https://gradio.app/guides/creating-a-chatbot-fast) for more examples using `gr.ChatInterface`. Collective team effort in [PR 4869](https://github.com/gradio-app/gradio/pull/4869)\n\n- Chatbot messages now show hyperlinks to download files uploaded to `gr.Chatbot()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4848](https://github.com/gradio-app/gradio/pull/4848)\n- Cached examples now work with generators and async generators by [@abidlabs](https://github.com/abidlabs) in [PR 4927](https://github.com/gradio-app/gradio/pull/4927)\n- Add RTL support to `gr.Markdown`, `gr.Chatbot`, `gr.Textbox` (via the `rtl` boolean parameter) and text-alignment to `gr.Textbox`(via the string `text_align` parameter) by [@abidlabs](https://github.com/abidlabs) in [PR 4933](https://github.com/gradio-app/gradio/pull/4933)\n\nExamples of usage:\n\n```py\nwith gr.Blocks() as demo:\n gr.Textbox(interactive=True, text_align=\"right\")\ndemo.launch()\n```\n\n```py\nwith gr.Blocks() as demo:\n gr.Markdown(\"\u0633\u0644\u0627\u0645\", rtl=True)\ndemo.launch()\n```\n\n- The `get_api_info` method of `Blocks` now supports layout output components [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4871](https://github.com/gradio-app/gradio/pull/4871)\n\n- Added the support for the new command `gradio environment`to make it easier for people to file bug reports if we shipped an easy command to list the OS, gradio version, and versions of gradio/gradio-client dependencies. bu [@varshneydevansh](https://github.com/varshneydevansh) in [PR 4915](https://github.com/gradio-app/gradio/pull/4915).\n\n### Bug Fixes:\n\n- The `.change()` event is fixed in `Video` and `Image` so that it only fires once by [@abidlabs](https://github.com/abidlabs) in [PR 4793](https://github.com/gradio-app/gradio/pull/4793)\n- The `.change()` event is fixed in `Audio` so that fires when the component value is programmatically updated by [@abidlabs](https://github.com/abidlabs) in [PR 4793](https://github.com/gradio-app/gradio/pull/4793)\n\n* Add missing `display: flex` property to `Row` so that flex styling is applied to children by [@hannahblair] in [PR 4896](https://github.com/gradio-app/gradio/pull/4896)\n* Fixed bug where `gr.Video` could not preprocess urls by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4904](https://github.com/gradio-app/gradio/pull/4904)\n* Fixed copy button rendering in API page on Safari by [@aliabid94](https://github.com/aliabid94) in [PR 4924](https://github.com/gradio-app/gradio/pull/4924)\n* Fixed `gr.Group` and `container=False`. `container` parameter only available for `Textbox`, `Number`, and `Dropdown`, the only elements where it makes sense. By [@aliabid94](https://github.com/aliabid94) in [PR 4916](https://github.com/gradio-app/gradio/pull/4916)\n* Fixed broken image link in auto-generated `app.py` from `ThemeClass.push_to_hub` by [@deepkyu](https://github.com/deepkyu) in [PR 4944](https://github.com/gradio-app/gradio/pull/4944)\n\n### Other Changes:\n\n- Warning on mobile that if a user leaves the tab, websocket connection may break. On broken connection, tries to rejoin queue and displays error conveying connection broke. By [@aliabid94](https://github.com/aliabid94) in [PR 4742](https://github.com/gradio-app/gradio/pull/4742)\n- Remove blocking network calls made before the local URL gets printed - these slow down the display of the local URL, especially when no internet is available. [@aliabid94](https://github.com/aliabid94) in [PR 4905](https://github.com/gradio-app/gradio/pull/4905).\n- Pinned dependencies to major versions to reduce the likelihood of a broken `gradio` due to changes in downstream dependencies by [@abidlabs](https://github.com/abidlabs) in [PR 4885](https://github.com/gradio-app/gradio/pull/4885)\n- Queue `max_size` defaults to parent Blocks `max_thread` when running on Spaces with ZeroGPU hardware. By [@cbensimon](https://github.com/cbensimon) in [PR 4937](https://github.com/gradio-app/gradio/pull/4937)\n\n### Breaking Changes:\n\nMotivated by the release of `pydantic==2.0`, which included breaking changes that broke a large number of Gradio apps, we've pinned many gradio dependencies. Note that pinned dependencies can cause downstream conflicts, so this may be a breaking change. That being said, we've kept the pins pretty loose, and we're expecting change to be better for the long-term stability of Gradio apps.\n\n## 3.36.1\n\n### New Features:\n\n- Hotfix to support pydantic v1 and v2 by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4835](https://github.com/gradio-app/gradio/pull/4835)\n\n### Bug Fixes:\n\n- Fix bug where `gr.File` change event was not triggered when the value was changed by another event by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4811](https://github.com/gradio-app/gradio/pull/4811)\n\n#\n\n#\n\n## 3.36.0\n\n### New Features:\n\n- The `gr.Video`, `gr.Audio`, `gr.Image`, `gr.Chatbot`, and `gr.Gallery` components now include a share icon when deployed on Spaces. This behavior can be modified by setting the `show_share_button` parameter in the component classes. by [@aliabid94](https://github.com/aliabid94) in [PR 4651](https://github.com/gradio-app/gradio/pull/4651)\n- Allow the web component `space`, `src`, and `host` attributes to be updated dynamically by [@pngwn](https://github.com/pngwn) in [PR 4461](https://github.com/gradio-app/gradio/pull/4461)\n- Suggestion for Spaces Duplication built into Gradio, by [@aliabid94](https://github.com/aliabid94) in [PR 4458](https://github.com/gradio-app/gradio/pull/4458)\n- The `api_name` parameter now accepts `False` as a value, which means it does not show up in named or unnamed endpoints. By [@abidlabs](https://github.com/aliabid94) in [PR 4683](https://github.com/gradio-app/gradio/pull/4683)\n- Added support for `pathlib.Path` in `gr.Video`, `gr.Gallery`, and `gr.Chatbot` by [sunilkumardash9](https://github.com/sunilkumardash9) in [PR 4581](https://github.com/gradio-app/gradio/pull/4581).\n\n### Bug Fixes:\n\n- Updated components with `info` attribute to update when `update()` is called on them. by [@jebarpg](https://github.com/jebarpg) in [PR 4715](https://github.com/gradio-app/gradio/pull/4715).\n- Ensure the `Image` components undo button works mode is `mask` or `color-sketch` by [@amyorz](https://github.com/AmyOrz) in [PR 4692](https://github.com/gradio-app/gradio/pull/4692)\n- Load the iframe resizer external asset asynchronously, by [@akx](https://github.com/akx) in [PR 4336](https://github.com/gradio-app/gradio/pull/4336)\n- Restored missing imports in `gr.components` by [@abidlabs](https://github.com/abidlabs) in [PR 4566](https://github.com/gradio-app/gradio/pull/4566)\n- Fix bug where `select` event was not triggered in `gr.Gallery` if `height` was set to be large with `allow_preview=False` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4551](https://github.com/gradio-app/gradio/pull/4551)\n- Fix bug where setting `visible=False` in `gr.Group` event did not work by [@abidlabs](https://github.com/abidlabs) in [PR 4567](https://github.com/gradio-app/gradio/pull/4567)\n- Fix `make_waveform` to work with paths that contain spaces [@akx](https://github.com/akx) in [PR 4570](https://github.com/gradio-app/gradio/pull/4570) & [PR 4578](https://github.com/gradio-app/gradio/pull/4578)\n- Send captured data in `stop_recording` event for `gr.Audio` and `gr.Video` components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4554](https://github.com/gradio-app/gradio/pull/4554)\n- Fix bug in `gr.Gallery` where `height` and `object_fit` parameters where being ignored by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4576](https://github.com/gradio-app/gradio/pull/4576)\n- Fixes an HTML sanitization issue in DOMPurify where links in markdown were not opening in a new window by [@hannahblair] in [PR 4577](https://github.com/gradio-app/gradio/pull/4577)\n- Fixed Dropdown height rendering in Columns by [@aliabid94](https://github.com/aliabid94) in [PR 4584](https://github.com/gradio-app/gradio/pull/4584)\n- Fixed bug where `AnnotatedImage` css styling was causing the annotation masks to not be displayed correctly by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4628](https://github.com/gradio-app/gradio/pull/4628)\n- Ensure that Gradio does not silently fail when running on a port that is occupied by [@abidlabs](https://github.com/abidlabs) in [PR 4624](https://github.com/gradio-app/gradio/pull/4624).\n- Fix double upload bug that caused lag in file uploads by [@aliabid94](https://github.com/aliabid94) in [PR 4661](https://github.com/gradio-app/gradio/pull/4661)\n- `Progress` component now appears even when no `iterable` is specified in `tqdm` constructor by [@itrushkin](https://github.com/itrushkin) in [PR 4475](https://github.com/gradio-app/gradio/pull/4475)\n- Deprecation warnings now point at the user code using those deprecated features, instead of Gradio internals, by (https://github.com/akx) in [PR 4694](https://github.com/gradio-app/gradio/pull/4694)\n- Adapt column widths in gr.Examples based on content by [@pngwn](https://github.com/pngwn) & [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4700](https://github.com/gradio-app/gradio/pull/4700)\n- The `plot` parameter deprecation warnings should now only be emitted for `Image` components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4709](https://github.com/gradio-app/gradio/pull/4709)\n- Removed uncessessary `type` deprecation warning by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4709](https://github.com/gradio-app/gradio/pull/4709)\n- Ensure Audio autoplays works when `autoplay=True` and the video source is dynamically updated [@pngwn](https://github.com/pngwn) in [PR 4705](https://github.com/gradio-app/gradio/pull/4705)\n- When an error modal is shown in spaces, ensure we scroll to the top so it can be seen by [@pngwn](https://github.com/pngwn) in [PR 4712](https://github.com/gradio-app/gradio/pull/4712)\n- Update depedencies by [@pngwn](https://github.com/pngwn) in [PR 4675](https://github.com/gradio-app/gradio/pull/4675)\n- Fixes `gr.Dropdown` being cutoff at the bottom by [@abidlabs](https://github.com/abidlabs) in [PR 4691](https://github.com/gradio-app/gradio/pull/4691).\n- Scroll top when clicking \"View API\" in spaces by [@pngwn](https://github.com/pngwn) in [PR 4714](https://github.com/gradio-app/gradio/pull/4714)\n- Fix bug where `show_label` was hiding the entire component for `gr.Label` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4713](https://github.com/gradio-app/gradio/pull/4713)\n- Don't crash when uploaded image has broken EXIF data, by [@akx](https://github.com/akx) in [PR 4764](https://github.com/gradio-app/gradio/pull/4764)\n- Place toast messages at the top of the screen by [@pngwn](https://github.com/pngwn) in [PR 4796](https://github.com/gradio-app/gradio/pull/4796)\n- Fix regressed styling of Login page when auth is enabled by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4797](https://github.com/gradio-app/gradio/pull/4797)\n- Prevent broken scrolling to output on Spaces by [@aliabid94](https://github.com/aliabid94) in [PR 4822](https://github.com/gradio-app/gradio/pull/4822)\n\n### Other Changes:\n\n- Add `.git-blame-ignore-revs` by [@akx](https://github.com/akx) in [PR 4586](https://github.com/gradio-app/gradio/pull/4586)\n- Update frontend dependencies in [PR 4601](https://github.com/gradio-app/gradio/pull/4601)\n- Use `typing.Literal` where possible in gradio library and client by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4608](https://github.com/gradio-app/gradio/pull/4608)\n- Remove unnecessary mock json files for frontend E2E tests by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4625](https://github.com/gradio-app/gradio/pull/4625)\n- Update dependencies by [@pngwn](https://github.com/pngwn) in [PR 4643](https://github.com/gradio-app/gradio/pull/4643)\n- The theme builder now launches successfully, and the API docs are cleaned up. By [@abidlabs](https://github.com/aliabid94) in [PR 4683](https://github.com/gradio-app/gradio/pull/4683)\n- Remove `cleared_value` from some components as its no longer used internally by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4685](https://github.com/gradio-app/gradio/pull/4685)\n- Better errors when you define two Blocks and reference components in one Blocks from the events in the other Blocks [@abidlabs](https://github.com/abidlabs) in [PR 4738](https://github.com/gradio-app/gradio/pull/4738).\n- Better message when share link is not created by [@abidlabs](https://github.com/abidlabs) in [PR 4773](https://github.com/gradio-app/gradio/pull/4773).\n- Improve accessibility around selected images in gr.Gallery component by [@hannahblair](https://github.com/hannahblair) in [PR 4790](https://github.com/gradio-app/gradio/pull/4790)\n\n### Breaking Changes:\n\n[PR 4683](https://github.com/gradio-app/gradio/pull/4683) removes the explict named endpoint \"load_examples\" from gr.Interface that was introduced in [PR 4456](https://github.com/gradio-app/gradio/pull/4456).\n\n## 3.35.2\n\n#\n\n### Bug Fixes:\n\n- Fix chatbot streaming by [@aliabid94](https://github.com/aliabid94) in [PR 4537](https://github.com/gradio-app/gradio/pull/4537)\n- Fix chatbot height and scrolling by [@aliabid94](https://github.com/aliabid94) in [PR 4540](https://github.com/gradio-app/gradio/pull/4540)\n\n#\n\n#\n\n## 3.35.1\n\n#\n\n### Bug Fixes:\n\n- Fix chatbot streaming by [@aliabid94](https://github.com/aliabid94) in [PR 4537](https://github.com/gradio-app/gradio/pull/4537)\n- Fix error modal position and text size by [@pngwn](https://github.com/pngwn) in [PR 4538](https://github.com/gradio-app/gradio/pull/4538).\n\n#\n\n#\n\n## 3.35.0\n\n### New Features:\n\n- A `gr.ClearButton` which allows users to easily clear the values of components by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456)\n\nExample usage:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot([(\"Hello\", \"How are you?\")])\n with gr.Row():\n textbox = gr.Textbox(scale=3, interactive=True)\n gr.ClearButton([textbox, chatbot], scale=1)\n\ndemo.launch()\n```\n\n- Min and max value for gr.Number by [@artegoser](https://github.com/artegoser) and [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3991](https://github.com/gradio-app/gradio/pull/3991)\n- Add `start_recording` and `stop_recording` events to `Video` and `Audio` components by [@pngwn](https://github.com/pngwn) in [PR 4422](https://github.com/gradio-app/gradio/pull/4422)\n- Allow any function to generate an error message and allow multiple messages to appear at a time. Other error modal improvements such as auto dismiss after a time limit and a new layout on mobile [@pngwn](https://github.com/pngwn) in [PR 4459](https://github.com/gradio-app/gradio/pull/4459).\n- Add `autoplay` kwarg to `Video` and `Audio` components by [@pngwn](https://github.com/pngwn) in [PR 4453](https://github.com/gradio-app/gradio/pull/4453)\n- Add `allow_preview` parameter to `Gallery` to control whether a detailed preview is displayed on click by\n [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4470](https://github.com/gradio-app/gradio/pull/4470)\n- Add `latex_delimiters` parameter to `Chatbot` to control the delimiters used for LaTeX and to disable LaTeX in the `Chatbot` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4516](https://github.com/gradio-app/gradio/pull/4516)\n- Can now issue `gr.Warning` and `gr.Info` modals. Simply put the code `gr.Warning(\"Your warning message\")` or `gr.Info(\"Your info message\")` as a standalone line in your function. By [@aliabid94](https://github.com/aliabid94) in [PR 4518](https://github.com/gradio-app/gradio/pull/4518).\n\nExample:\n\n```python\ndef start_process(name):\n gr.Info(\"Starting process\")\n if name is None:\n gr.Warning(\"Name is empty\")\n ...\n if success == False:\n raise gr.Error(\"Process failed\")\n```\n\n### Bug Fixes:\n\n- Add support for PAUSED state in the JS client by [@abidlabs](https://github.com/abidlabs) in [PR 4438](https://github.com/gradio-app/gradio/pull/4438)\n- Ensure Tabs only occupy the space required by [@pngwn](https://github.com/pngwn) in [PR 4419](https://github.com/gradio-app/gradio/pull/4419)\n- Ensure components have the correct empty sizes to prevent empty containers from collapsing by [@pngwn](https://github.com/pngwn) in [PR 4447](https://github.com/gradio-app/gradio/pull/4447).\n- Frontend code no longer crashes when there is a relative URL in an `
` element, by [@akx](https://github.com/akx) in [PR 4449](https://github.com/gradio-app/gradio/pull/4449).\n- Fix bug where setting `format='mp4'` on a video component would cause the function to error out if the uploaded video was not playable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4467](https://github.com/gradio-app/gradio/pull/4467)\n- Fix `_js` parameter to work even without backend function, by [@aliabid94](https://github.com/aliabid94) in [PR 4486](https://github.com/gradio-app/gradio/pull/4486).\n- Fix new line issue with `gr.Chatbot()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4491](https://github.com/gradio-app/gradio/pull/4491)\n- Fixes issue with Clear button not working for `Label` component by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456)\n- Restores the ability to pass in a tuple (sample rate, audio array) to gr.Audio() by [@abidlabs](https://github.com/abidlabs) in [PR 4525](https://github.com/gradio-app/gradio/pull/4525)\n- Ensure code is correctly formatted and copy button is always present in Chatbot by [@pngwn](https://github.com/pngwn) in [PR 4527](https://github.com/gradio-app/gradio/pull/4527)\n- `show_label` will not automatically be set to `True` in `gr.BarPlot.update` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4531](https://github.com/gradio-app/gradio/pull/4531)\n- `gr.BarPlot` group text now respects darkmode by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4531](https://github.com/gradio-app/gradio/pull/4531)\n- Fix dispatched errors from within components [@aliabid94](https://github.com/aliabid94) in [PR 4786](https://github.com/gradio-app/gradio/pull/4786)\n\n### Other Changes:\n\n- Change styling of status and toast error components by [@hannahblair](https://github.com/hannahblair) in [PR 4454](https://github.com/gradio-app/gradio/pull/4454).\n- Clean up unnecessary `new Promise()`s by [@akx](https://github.com/akx) in [PR 4442](https://github.com/gradio-app/gradio/pull/4442).\n- Minor UI cleanup for Examples and Dataframe components [@aliabid94](https://github.com/aliabid94) in [PR 4455](https://github.com/gradio-app/gradio/pull/4455).\n- Minor UI cleanup for Examples and Dataframe components [@aliabid94](https://github.com/aliabid94) in [PR 4455](https://github.com/gradio-app/gradio/pull/4455).\n- Add Catalan translation [@jordimas](https://github.com/jordimas) in [PR 4483](https://github.com/gradio-app/gradio/pull/4483).\n- The API endpoint that loads examples upon click has been given an explicit name (\"/load_examples\") by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456).\n- Allows configuration of FastAPI app when calling `mount_gradio_app`, by [@charlesfrye](https://github.com/charlesfrye) in [PR4519](https://github.com/gradio-app/gradio/pull/4519).\n\n### Breaking Changes:\n\n- The behavior of the `Clear` button has been changed for `Slider`, `CheckboxGroup`, `Radio`, `Dropdown` components by [@abidlabs](https://github.com/abidlabs) in [PR 4456](https://github.com/gradio-app/gradio/pull/4456). The Clear button now sets the value of these components to be empty as opposed to the original default set by the developer. This is to make them in line with the rest of the Gradio components.\n- Python 3.7 end of life is June 27 2023. Gradio will no longer support python 3.7 by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4484](https://github.com/gradio-app/gradio/pull/4484)\n- Removed `$` as a default LaTeX delimiter for the `Chatbot` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4516](https://github.com/gradio-app/gradio/pull/4516). The specific LaTeX delimeters can be set using the new `latex_delimiters` parameter in `Chatbot`.\n\n## 3.34.0\n\n### New Features:\n\n- The `gr.UploadButton` component now supports the `variant` and `interactive` parameters by [@abidlabs](https://github.com/abidlabs) in [PR 4436](https://github.com/gradio-app/gradio/pull/4436).\n\n### Bug Fixes:\n\n- Remove target=\"\\_blank\" override on anchor tags with internal targets by [@hannahblair](https://github.com/hannahblair) in [PR 4405](https://github.com/gradio-app/gradio/pull/4405)\n- Fixed bug where `gr.File(file_count='multiple')` could not be cached as output by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4421](https://github.com/gradio-app/gradio/pull/4421)\n- Restricts the domains that can be proxied via `/proxy` route by [@abidlabs](https://github.com/abidlabs) in [PR 4406](https://github.com/gradio-app/gradio/pull/4406).\n- Fixes issue where `gr.UploadButton` could not be used to upload the same file twice by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4437](https://github.com/gradio-app/gradio/pull/4437)\n- Fixes bug where `/proxy` route was being incorrectly constructed by the frontend by [@abidlabs](https://github.com/abidlabs) in [PR 4430](https://github.com/gradio-app/gradio/pull/4430).\n- Fix z-index of status component by [@hannahblair](https://github.com/hannahblair) in [PR 4429](https://github.com/gradio-app/gradio/pull/4429)\n- Fix video rendering in Safari by [@aliabid94](https://github.com/aliabid94) in [PR 4433](https://github.com/gradio-app/gradio/pull/4433).\n- The output directory for files downloaded when calling Blocks as a function is now set to a temporary directory by default (instead of the working directory in some cases) by [@abidlabs](https://github.com/abidlabs) in [PR 4501](https://github.com/gradio-app/gradio/pull/4501)\n\n### Other Changes:\n\n- When running on Spaces, handler functions will be transformed by the [PySpaces](https://pypi.org/project/spaces/) library in order to make them work with specific hardware. It will have no effect on standalone Gradio apps or regular Gradio Spaces and can be globally deactivated as follows : `import spaces; spaces.disable_gradio_auto_wrap()` by [@cbensimon](https://github.com/cbensimon) in [PR 4389](https://github.com/gradio-app/gradio/pull/4389).\n- Deprecated `.style` parameter and moved arguments to constructor. Added support for `.update()` to all arguments initially in style. Added `scale` and `min_width` support to every Component. By [@aliabid94](https://github.com/aliabid94) in [PR 4374](https://github.com/gradio-app/gradio/pull/4374)\n\n#\n\n## 3.33.1\n\n#\n\n### Bug Fixes:\n\n- Allow `every` to work with generators by [@dkjshk](https://github.com/dkjshk) in [PR 4434](https://github.com/gradio-app/gradio/pull/4434)\n- Fix z-index of status component by [@hannahblair](https://github.com/hannahblair) in [PR 4429](https://github.com/gradio-app/gradio/pull/4429)\n- Allow gradio to work offline, by [@aliabid94](https://github.com/aliabid94) in [PR 4398](https://github.com/gradio-app/gradio/pull/4398).\n- Fixed `validate_url` to check for 403 errors and use a GET request in place of a HEAD by [@alvindaiyan](https://github.com/alvindaiyan) in [PR 4388](https://github.com/gradio-app/gradio/pull/4388).\n\n### Other Changes:\n\n- More explicit error message when share link binary is blocked by antivirus by [@abidlabs](https://github.com/abidlabs) in [PR 4380](https://github.com/gradio-app/gradio/pull/4380).\n\n#\n\n## 3.33.0\n\n### New Features:\n\n- Introduced `gradio deploy` to launch a Gradio app to Spaces directly from your terminal. By [@aliabid94](https://github.com/aliabid94) in [PR 4033](https://github.com/gradio-app/gradio/pull/4033).\n- Introduce `show_progress='corner'` argument to event listeners, which will not cover the output components with the progress animation, but instead show it in the corner of the components. By [@aliabid94](https://github.com/aliabid94) in [PR 4396](https://github.com/gradio-app/gradio/pull/4396).\n\n### Bug Fixes:\n\n- Fix bug where Label change event was triggering itself by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4371](https://github.com/gradio-app/gradio/pull/4371)\n- Make `Blocks.load` behave like other event listeners (allows chaining `then` off of it) [@anentropic](https://github.com/anentropic/) in [PR 4304](https://github.com/gradio-app/gradio/pull/4304)\n- Respect `interactive=True` in output components of a `gr.Interface` by [@abidlabs](https://github.com/abidlabs) in [PR 4356](https://github.com/gradio-app/gradio/pull/4356).\n- Remove unused frontend code by [@akx](https://github.com/akx) in [PR 4275](https://github.com/gradio-app/gradio/pull/4275)\n- Fixes favicon path on Windows by [@abidlabs](https://github.com/abidlabs) in [PR 4369](https://github.com/gradio-app/gradio/pull/4369).\n- Prevent path traversal in `/file` routes by [@abidlabs](https://github.com/abidlabs) in [PR 4370](https://github.com/gradio-app/gradio/pull/4370).\n- Do not send HF token to other domains via `/proxy` route by [@abidlabs](https://github.com/abidlabs) in [PR 4368](https://github.com/gradio-app/gradio/pull/4368).\n- Replace default `markedjs` sanitize function with DOMPurify sanitizer for `gr.Chatbot()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4360](https://github.com/gradio-app/gradio/pull/4360)\n- Prevent the creation of duplicate copy buttons in the chatbot and ensure copy buttons work in non-secure contexts by [@binary-husky](https://github.com/binary-husky) in [PR 4350](https://github.com/gradio-app/gradio/pull/4350).\n\n### Other Changes:\n\n- Remove flicker of loading bar by adding opacity transition, by [@aliabid94](https://github.com/aliabid94) in [PR 4349](https://github.com/gradio-app/gradio/pull/4349).\n- Performance optimization in the frontend's Blocks code by [@akx](https://github.com/akx) in [PR 4334](https://github.com/gradio-app/gradio/pull/4334)\n- Upgrade the pnpm lock file format version from v6.0 to v6.1 by [@whitphx](https://github.com/whitphx) in [PR 4393](https://github.com/gradio-app/gradio/pull/4393)\n\n### Breaking Changes:\n\n- The `/file=` route no longer allows accessing dotfiles or files in \"dot directories\" by [@akx](https://github.com/akx) in [PR 4303](https://github.com/gradio-app/gradio/pull/4303)\n\n## 3.32.0\n\n### New Features:\n\n- `Interface.launch()` and `Blocks.launch()` now accept an `app_kwargs` argument to allow customizing the configuration of the underlying FastAPI app, by [@akx](https://github.com/akx) in [PR 4282](https://github.com/gradio-app/gradio/pull/4282)\n\n### Bug Fixes:\n\n- Fixed Gallery/AnnotatedImage components not respecting GRADIO_DEFAULT_DIR variable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4256](https://github.com/gradio-app/gradio/pull/4256)\n- Fixed Gallery/AnnotatedImage components resaving identical images by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4256](https://github.com/gradio-app/gradio/pull/4256)\n- Fixed Audio/Video/File components creating empty tempfiles on each run by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4256](https://github.com/gradio-app/gradio/pull/4256)\n- Fixed the behavior of the `run_on_click` parameter in `gr.Examples` by [@abidlabs](https://github.com/abidlabs) in [PR 4258](https://github.com/gradio-app/gradio/pull/4258).\n- Ensure error modal displays when the queue is enabled by [@pngwn](https://github.com/pngwn) in [PR 4273](https://github.com/gradio-app/gradio/pull/4273)\n- Ensure js client respcts the full root when making requests to the server by [@pngwn](https://github.com/pngwn) in [PR 4271](https://github.com/gradio-app/gradio/pull/4271)\n\n### Other Changes:\n\n- Refactor web component `initial_height` attribute by [@whitphx](https://github.com/whitphx) in [PR 4223](https://github.com/gradio-app/gradio/pull/4223)\n- Relocate `mount_css` fn to remove circular dependency [@whitphx](https://github.com/whitphx) in [PR 4222](https://github.com/gradio-app/gradio/pull/4222)\n- Upgrade Black to 23.3 by [@akx](https://github.com/akx) in [PR 4259](https://github.com/gradio-app/gradio/pull/4259)\n- Add frontend LaTeX support in `gr.Chatbot()` using `KaTeX` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4285](https://github.com/gradio-app/gradio/pull/4285).\n\n#\n\n## 3.31.0\n\n### New Features:\n\n- The reloader command (`gradio app.py`) can now accept command line arguments by [@micky2be](https://github.com/micky2be) in [PR 4119](https://github.com/gradio-app/gradio/pull/4119)\n- Added `format` argument to `Audio` component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4178](https://github.com/gradio-app/gradio/pull/4178)\n- Add JS client code snippets to use via api page by [@aliabd](https://github.com/aliabd) in [PR 3927](https://github.com/gradio-app/gradio/pull/3927).\n- Update to the JS client by [@pngwn](https://github.com/pngwn) in [PR 4202](https://github.com/gradio-app/gradio/pull/4202)\n\n### Bug Fixes:\n\n- Fix \"TypeError: issubclass() arg 1 must be a class\" When use Optional[Types] by [@lingfengchencn](https://github.com/lingfengchencn) in [PR 4200](https://github.com/gradio-app/gradio/pull/4200).\n- Gradio will no longer send any analytics or call home if analytics are disabled with the GRADIO_ANALYTICS_ENABLED environment variable. By [@akx](https://github.com/akx) in [PR 4194](https://github.com/gradio-app/gradio/pull/4194) and [PR 4236](https://github.com/gradio-app/gradio/pull/4236)\n- The deprecation warnings for kwargs now show the actual stack level for the invocation, by [@akx](https://github.com/akx) in [PR 4203](https://github.com/gradio-app/gradio/pull/4203).\n- Fix \"TypeError: issubclass() arg 1 must be a class\" When use Optional[Types] by [@lingfengchencn](https://github.com/lingfengchencn) in [PR 4200](https://github.com/gradio-app/gradio/pull/4200).\n- Ensure cancelling functions work correctly by [@pngwn](https://github.com/pngwn) in [PR 4225](https://github.com/gradio-app/gradio/pull/4225)\n- Fixes a bug with typing.get_type_hints() on Python 3.9 by [@abidlabs](https://github.com/abidlabs) in [PR 4228](https://github.com/gradio-app/gradio/pull/4228).\n- Fixes JSONDecodeError by [@davidai](https://github.com/davidai) in [PR 4241](https://github.com/gradio-app/gradio/pull/4241)\n- Fix `chatbot_dialogpt` demo by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4238](https://github.com/gradio-app/gradio/pull/4238).\n\n### Other Changes:\n\n- Change `gr.Chatbot()` markdown parsing to frontend using `marked` library and `prism` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4150](https://github.com/gradio-app/gradio/pull/4150)\n- Update the js client by [@pngwn](https://github.com/pngwn) in [PR 3899](https://github.com/gradio-app/gradio/pull/3899)\n- Fix documentation for the shape of the numpy array produced by the `Image` component by [@der3318](https://github.com/der3318) in [PR 4204](https://github.com/gradio-app/gradio/pull/4204).\n- Updates the timeout for websocket messaging from 1 second to 5 seconds by [@abidlabs](https://github.com/abidlabs) in [PR 4235](https://github.com/gradio-app/gradio/pull/4235)\n\n#\n\n## 3.30.0\n\n### New Features:\n\n- Adds a `root_path` parameter to `launch()` that allows running Gradio applications on subpaths (e.g. www.example.com/app) behind a proxy, by [@abidlabs](https://github.com/abidlabs) in [PR 4133](https://github.com/gradio-app/gradio/pull/4133)\n- Fix dropdown change listener to trigger on change when updated as an output by [@aliabid94](https://github.com/aliabid94) in [PR 4128](https://github.com/gradio-app/gradio/pull/4128).\n- Add `.input` event listener, which is only triggered when a user changes the component value (as compared to `.change`, which is also triggered when a component updates as the result of a function trigger), by [@aliabid94](https://github.com/aliabid94) in [PR 4157](https://github.com/gradio-app/gradio/pull/4157).\n\n### Bug Fixes:\n\n- Records username when flagging by [@abidlabs](https://github.com/abidlabs) in [PR 4135](https://github.com/gradio-app/gradio/pull/4135)\n- Fix website build issue by [@aliabd](https://github.com/aliabd) in [PR 4142](https://github.com/gradio-app/gradio/pull/4142)\n- Fix lang agnostic type info for `gr.File(file_count='multiple')` output components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4153](https://github.com/gradio-app/gradio/pull/4153)\n\n#\n\n#\n\n## 3.29.0\n\n### New Features:\n\n- Returning language agnostic types in the `/info` route by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4039](https://github.com/gradio-app/gradio/pull/4039)\n\n### Bug Fixes:\n\n- Allow users to upload audio files in Audio component on iOS by by [@aliabid94](https://github.com/aliabid94) in [PR 4071](https://github.com/gradio-app/gradio/pull/4071).\n- Fixes the gradio theme builder error that appeared on launch by [@aliabid94](https://github.com/aliabid94) and [@abidlabs](https://github.com/abidlabs) in [PR 4080](https://github.com/gradio-app/gradio/pull/4080)\n- Keep Accordion content in DOM by [@aliabid94](https://github.com/aliabid94) in [PR 4070](https://github.com/gradio-app/gradio/pull/4073)\n- Fixed bug where type hints in functions caused the event handler to crash by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4068](https://github.com/gradio-app/gradio/pull/4068)\n- Fix dropdown default value not appearing by [@aliabid94](https://github.com/aliabid94) in [PR 4072](https://github.com/gradio-app/gradio/pull/4072).\n- Soft theme label color fix by [@aliabid94](https://github.com/aliabid94) in [PR 4070](https://github.com/gradio-app/gradio/pull/4070)\n- Fix `gr.Slider` `release` event not triggering on mobile by [@space-nuko](https://github.com/space-nuko) in [PR 4098](https://github.com/gradio-app/gradio/pull/4098)\n- Removes extraneous `State` component info from the `/info` route by [@abidlabs](https://github.com/freddyaboulton) in [PR 4107](https://github.com/gradio-app/gradio/pull/4107)\n- Make .then() work even if first event fails by [@aliabid94](https://github.com/aliabid94) in [PR 4115](https://github.com/gradio-app/gradio/pull/4115).\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Allow users to submit with enter in Interfaces with textbox / number inputs [@aliabid94](https://github.com/aliabid94) in [PR 4090](https://github.com/gradio-app/gradio/pull/4090).\n- Updates gradio's requirements.txt to requires uvicorn>=0.14.0 by [@abidlabs](https://github.com/abidlabs) in [PR 4086](https://github.com/gradio-app/gradio/pull/4086)\n- Updates some error messaging by [@abidlabs](https://github.com/abidlabs) in [PR 4086](https://github.com/gradio-app/gradio/pull/4086)\n- Renames simplified Chinese translation file from `zh-cn.json` to `zh-CN.json` by [@abidlabs](https://github.com/abidlabs) in [PR 4086](https://github.com/gradio-app/gradio/pull/4086)\n\n#\n\n## 3.28.3\n\n#\n\n### Bug Fixes:\n\n- Fixes issue with indentation in `gr.Code()` component with streaming by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4043](https://github.com/gradio-app/gradio/pull/4043)\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.28.2\n\n### Bug Fixes\n\n- Code component visual updates by [@pngwn](https://github.com/pngwn) in [PR 4051](https://github.com/gradio-app/gradio/pull/4051)\n\n### New Features:\n\n- Add support for `visual-question-answering`, `document-question-answering`, and `image-to-text` using `gr.Interface.load(\"models/...\")` and `gr.Interface.from_pipeline` by [@osanseviero](https://github.com/osanseviero) in [PR 3887](https://github.com/gradio-app/gradio/pull/3887)\n- Add code block support in `gr.Chatbot()`, by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 4048](https://github.com/gradio-app/gradio/pull/4048)\n- Adds the ability to blocklist filepaths (and also improves the allowlist mechanism) by [@abidlabs](https://github.com/abidlabs) in [PR 4047](https://github.com/gradio-app/gradio/pull/4047).\n- Adds the ability to specify the upload directory via an environment variable by [@abidlabs](https://github.com/abidlabs) in [PR 4047](https://github.com/gradio-app/gradio/pull/4047).\n\n### Bug Fixes:\n\n- Fixes issue with `matplotlib` not rendering correctly if the backend was not set to `Agg` by [@abidlabs](https://github.com/abidlabs) in [PR 4029](https://github.com/gradio-app/gradio/pull/4029)\n- Fixes bug where rendering the same `gr.State` across different Interfaces/Blocks within larger Blocks would not work by [@abidlabs](https://github.com/abidlabs) in [PR 4030](https://github.com/gradio-app/gradio/pull/4030)\n- Code component visual updates by [@pngwn](https://github.com/pngwn) in [PR 4051](https://github.com/gradio-app/gradio/pull/4051)\n\n### Documentation Changes:\n\n- Adds a Guide on how to use the Python Client within a FastAPI app, by [@abidlabs](https://github.com/abidlabs) in [PR 3892](https://github.com/gradio-app/gradio/pull/3892)\n\n#\n\n### Breaking Changes:\n\n- `gr.HuggingFaceDatasetSaver` behavior changed internally. The `flagging/` folder is not a `.git/` folder anymore when using it. `organization` parameter is now ignored in favor of passing a full dataset id as `dataset_name` (e.g. `\"username/my-dataset\"`).\n- New lines (`\\n`) are not automatically converted to `
` in `gr.Markdown()` or `gr.Chatbot()`. For multiple new lines, a developer must add multiple `
` tags.\n\n### Full Changelog:\n\n- Safer version of `gr.HuggingFaceDatasetSaver` using HTTP methods instead of git pull/push by [@Wauplin](https://github.com/Wauplin) in [PR 3973](https://github.com/gradio-app/gradio/pull/3973)\n\n#\n\n## 3.28.1\n\n### New Features:\n\n- Add a \"clear mask\" button to `gr.Image` sketch modes, by [@space-nuko](https://github.com/space-nuko) in [PR 3615](https://github.com/gradio-app/gradio/pull/3615)\n\n### Bug Fixes:\n\n- Fix dropdown default value not appearing by [@aliabid94](https://github.com/aliabid94) in [PR 3996](https://github.com/gradio-app/gradio/pull/3996).\n- Fix faded coloring of output textboxes in iOS / Safari by [@aliabid94](https://github.com/aliabid94) in [PR 3993](https://github.com/gradio-app/gradio/pull/3993)\n\n#\n\n### Testing and Infrastructure Changes:\n\n- CI: Simplified Python CI workflow by [@akx](https://github.com/akx) in [PR 3982](https://github.com/gradio-app/gradio/pull/3982)\n- Upgrade pyright to 1.1.305 by [@akx](https://github.com/akx) in [PR 4042](https://github.com/gradio-app/gradio/pull/4042)\n- More Ruff rules are enabled and lint errors fixed by [@akx](https://github.com/akx) in [PR 4038](https://github.com/gradio-app/gradio/pull/4038)\n\n#\n\n#\n\n#\n\n## 3.28.0\n\n### Bug Fixes:\n\n- Fix duplicate play commands in full-screen mode of 'video'. by [@tomchang25](https://github.com/tomchang25) in [PR 3968](https://github.com/gradio-app/gradio/pull/3968).\n- Fix the issue of the UI stuck caused by the 'selected' of DataFrame not being reset. by [@tomchang25](https://github.com/tomchang25) in [PR 3916](https://github.com/gradio-app/gradio/pull/3916).\n- Fix issue where `gr.Video()` would not work inside a `gr.Tab()` by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3891](https://github.com/gradio-app/gradio/pull/3891)\n- Fixed issue with old_value check in File. by [@tomchang25](https://github.com/tomchang25) in [PR 3859](https://github.com/gradio-app/gradio/pull/3859).\n- Fixed bug where all bokeh plots appeared in the same div by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3896](https://github.com/gradio-app/gradio/pull/3896)\n- Fixed image outputs to automatically take full output image height, unless explicitly set, by [@aliabid94](https://github.com/aliabid94) in [PR 3905](https://github.com/gradio-app/gradio/pull/3905)\n- Fix issue in `gr.Gallery()` where setting height causes aspect ratio of images to collapse by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3830](https://github.com/gradio-app/gradio/pull/3830)\n- Fix issue where requesting for a non-existing file would trigger a 500 error by [@micky2be](https://github.com/micky2be) in `[PR 3895](https://github.com/gradio-app/gradio/pull/3895)`.\n- Fix bugs with abspath about symlinks, and unresolvable path on Windows by [@micky2be](https://github.com/micky2be) in `[PR 3895](https://github.com/gradio-app/gradio/pull/3895)`.\n- Fixes type in client `Status` enum by [@10zinten](https://github.com/10zinten) in [PR 3931](https://github.com/gradio-app/gradio/pull/3931)\n- Fix `gr.ChatBot` to handle image url [tye-singwa](https://github.com/tye-signwa) in [PR 3953](https://github.com/gradio-app/gradio/pull/3953)\n- Move Google Tag Manager related initialization code to analytics-enabled block by [@akx](https://github.com/akx) in [PR 3956](https://github.com/gradio-app/gradio/pull/3956)\n- Fix bug where port was not reused if the demo was closed and then re-launched by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3896](https://github.com/gradio-app/gradio/pull/3959)\n- Fixes issue where dropdown does not position itself at selected element when opened [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3639](https://github.com/gradio-app/gradio/pull/3639)\n\n### Documentation Changes:\n\n- Make use of `gr` consistent across the docs by [@duerrsimon](https://github.com/duerrsimon) in [PR 3901](https://github.com/gradio-app/gradio/pull/3901)\n- Fixed typo in theming-guide.md by [@eltociear](https://github.com/eltociear) in [PR 3952](https://github.com/gradio-app/gradio/pull/3952)\n\n### Testing and Infrastructure Changes:\n\n- CI: Python backend lint is only run once, by [@akx](https://github.com/akx) in [PR 3960](https://github.com/gradio-app/gradio/pull/3960)\n- Format invocations and concatenations were replaced by f-strings where possible by [@akx](https://github.com/akx) in [PR 3984](https://github.com/gradio-app/gradio/pull/3984)\n- Linting rules were made more strict and issues fixed by [@akx](https://github.com/akx) in [PR 3979](https://github.com/gradio-app/gradio/pull/3979).\n\n### Breaking Changes:\n\n- Some re-exports in `gradio.themes` utilities (introduced in 3.24.0) have been eradicated.\n By [@akx](https://github.com/akx) in [PR 3958](https://github.com/gradio-app/gradio/pull/3958)\n\n### Full Changelog:\n\n- Add DESCRIPTION.md to image_segmentation demo by [@aliabd](https://github.com/aliabd) in [PR 3866](https://github.com/gradio-app/gradio/pull/3866)\n- Fix error in running `gr.themes.builder()` by [@deepkyu](https://github.com/deepkyu) in [PR 3869](https://github.com/gradio-app/gradio/pull/3869)\n- Fixed a JavaScript TypeError when loading custom JS with `_js` and setting `outputs` to `None` in `gradio.Blocks()` by [@DavG25](https://github.com/DavG25) in [PR 3883](https://github.com/gradio-app/gradio/pull/3883)\n- Fixed bg_background_fill theme property to expand to whole background, block_radius to affect form elements as well, and added block_label_shadow theme property by [@aliabid94](https://github.com/aliabid94) in [PR 3590](https://github.com/gradio-app/gradio/pull/3590)\n\n#\n\n## 3.27.0\n\n### New Features:\n\n###### AnnotatedImage Component\n\nNew AnnotatedImage component allows users to highlight regions of an image, either by providing bounding boxes, or 0-1 pixel masks. This component is useful for tasks such as image segmentation, object detection, and image captioning.\n\n![AnnotatedImage screenshot](https://user-images.githubusercontent.com/7870876/232142720-86e0020f-beaf-47b9-a843-689c9621f09c.gif)\n\nExample usage:\n\n```python\nwith gr.Blocks() as demo:\n img = gr.Image()\n img_section = gr.AnnotatedImage()\n def mask(img):\n top_left_corner = [0, 0, 20, 20]\n random_mask = np.random.randint(0, 2, img.shape[:2])\n return (img, [(top_left_corner, \"left corner\"), (random_mask, \"random\")])\n img.change(mask, img, img_section)\n```\n\nSee the [image_segmentation demo](https://github.com/gradio-app/gradio/tree/main/demo/image_segmentation) for a full example. By [@aliabid94](https://github.com/aliabid94) in [PR 3836](https://github.com/gradio-app/gradio/pull/3836)\n\n#\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.26.0\n\n### New Features:\n\n###### `Video` component supports subtitles\n\n- Allow the video component to accept subtitles as input, by [@tomchang25](https://github.com/tomchang25) in [PR 3673](https://github.com/gradio-app/gradio/pull/3673). To provide subtitles, simply return a tuple consisting of `(path_to_video, path_to_subtitles)` from your function. Both `.srt` and `.vtt` formats are supported:\n\n```py\nwith gr.Blocks() as demo:\n gr.Video((\"video.mp4\", \"captions.srt\"))\n```\n\n### Bug Fixes:\n\n- Fix code markdown support in `gr.Chatbot()` component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3816](https://github.com/gradio-app/gradio/pull/3816)\n\n### Documentation Changes:\n\n- Updates the \"view API\" page in Gradio apps to use the `gradio_client` library by [@aliabd](https://github.com/aliabd) in [PR 3765](https://github.com/gradio-app/gradio/pull/3765)\n\n- Read more about how to use the `gradio_client` library here: https://gradio.app/getting-started-with-the-python-client/\n\n#\n\n#\n\n#\n\n#\n\n## 3.25.0\n\n### New Features:\n\n- Improve error messages when number of inputs/outputs to event handlers mismatch, by [@space-nuko](https://github.com/space-nuko) in [PR 3519](https://github.com/gradio-app/gradio/pull/3519)\n\n- Add `select` listener to Images, allowing users to click on any part of an image and get the coordinates of the click by [@aliabid94](https://github.com/aliabid94) in [PR 3786](https://github.com/gradio-app/gradio/pull/3786).\n\n```python\nwith gr.Blocks() as demo:\n img = gr.Image()\n textbox = gr.Textbox()\n\n def select_handler(img, evt: gr.SelectData):\n selected_pixel = img[evt.index[1], evt.index[0]]\n return f\"Selected pixel: {selected_pixel}\"\n\n img.select(select_handler, img, textbox)\n```\n\n![Recording 2023-04-08 at 17 44 39](https://user-images.githubusercontent.com/7870876/230748572-90a2a8d5-116d-4769-bb53-5516555fbd0f.gif)\n\n### Bug Fixes:\n\n- Increase timeout for sending analytics data by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3647](https://github.com/gradio-app/gradio/pull/3647)\n- Fix bug where http token was not accessed over websocket connections by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3735](https://github.com/gradio-app/gradio/pull/3735)\n- Add ability to specify `rows`, `columns` and `object-fit` in `style()` for `gr.Gallery()` component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3586](https://github.com/gradio-app/gradio/pull/3586)\n- Fix bug where recording an audio file through the microphone resulted in a corrupted file name by [@abidlabs](https://github.com/abidlabs) in [PR 3770](https://github.com/gradio-app/gradio/pull/3770)\n- Added \"ssl_verify\" to blocks.launch method to allow for use of self-signed certs by [@garrettsutula](https://github.com/garrettsutula) in [PR 3873](https://github.com/gradio-app/gradio/pull/3873)\n- Fix bug where iterators where not being reset for processes that terminated early by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3777](https://github.com/gradio-app/gradio/pull/3777)\n- Fix bug where the upload button was not properly handling the `file_count='multiple'` case by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3782](https://github.com/gradio-app/gradio/pull/3782)\n- Fix bug where use Via API button was giving error by [@Devang-C](https://github.com/Devang-C) in [PR 3783](https://github.com/gradio-app/gradio/pull/3783)\n\n### Documentation Changes:\n\n- Fix invalid argument docstrings, by [@akx](https://github.com/akx) in [PR 3740](https://github.com/gradio-app/gradio/pull/3740)\n\n#\n\n#\n\n### Full Changelog:\n\n- Fixed IPv6 listening to work with bracket [::1] notation, by [@dsully](https://github.com/dsully) in [PR 3695](https://github.com/gradio-app/gradio/pull/3695)\n\n#\n\n## 3.24.1\n\n### New Features:\n\n- No changes to highlight.\n\n### Bug Fixes:\n\n- Fixes Chatbot issue where new lines were being created every time a message was sent back and forth by [@aliabid94](https://github.com/aliabid94) in [PR 3717](https://github.com/gradio-app/gradio/pull/3717).\n- Fixes data updating in DataFrame invoking a `select` event once the dataframe has been selected. By [@yiyuezhuo](https://github.com/yiyuezhuo) in [PR 3861](https://github.com/gradio-app/gradio/pull/3861)\n- Fixes false positive warning which is due to too strict type checking by [@yiyuezhuo](https://github.com/yiyuezhuo) in [PR 3837](https://github.com/gradio-app/gradio/pull/3837).\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.24.0\n\n### New Features:\n\n- Trigger the release event when Slider number input is released or unfocused by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3589](https://github.com/gradio-app/gradio/pull/3589)\n- Created Theme Builder, which allows users to create themes without writing any code, by [@aliabid94](https://github.com/aliabid94) in [PR 3664](https://github.com/gradio-app/gradio/pull/3664). Launch by:\n\n ```python\n import gradio as gr\n gr.themes.builder()\n ```\n\n ![Theme Builder](https://user-images.githubusercontent.com/7870876/228204929-d71cbba5-69c2-45b3-bd20-e3a201d98b12.png)\n\n- The `Dropdown` component now has a `allow_custom_value` parameter that lets users type in custom values not in the original list of choices.\n- The `Colorpicker` component now has a `.blur()` event\n\n###### Added a download button for videos! \ud83d\udce5\n\n![download_video](https://user-images.githubusercontent.com/41651716/227009612-9bc5fb72-2a44-4c55-9b7b-a0fa098e7f25.gif)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3581](https://github.com/gradio-app/gradio/pull/3581).\n\n- Trigger the release event when Slider number input is released or unfocused by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3589](https://github.com/gradio-app/gradio/pull/3589)\n\n### Bug Fixes:\n\n- Fixed bug where text for altair plots was not legible in dark mode by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3555](https://github.com/gradio-app/gradio/pull/3555)\n- Fixes `Chatbot` and `Image` components so that files passed during processing are added to a directory where they can be served from, by [@abidlabs](https://github.com/abidlabs) in [PR 3523](https://github.com/gradio-app/gradio/pull/3523)\n- Use Gradio API server to send telemetry using `huggingface_hub` [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3488](https://github.com/gradio-app/gradio/pull/3488)\n- Fixes an an issue where if the Blocks scope was not exited, then State could be shared across sessions, by [@abidlabs](https://github.com/abidlabs) in [PR 3600](https://github.com/gradio-app/gradio/pull/3600)\n- Ensures that `gr.load()` loads and applies the upstream theme, by [@abidlabs](https://github.com/abidlabs) in [PR 3641](https://github.com/gradio-app/gradio/pull/3641)\n- Fixed bug where \"or\" was not being localized in file upload text by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3599](https://github.com/gradio-app/gradio/pull/3599)\n- Fixed bug where chatbot does not autoscroll inside of a tab, row or column by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3637](https://github.com/gradio-app/gradio/pull/3637)\n- Fixed bug where textbox shrinks when `lines` set to larger than 20 by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3637](https://github.com/gradio-app/gradio/pull/3637)\n- Ensure CSS has fully loaded before rendering the application, by [@pngwn](https://github.com/pngwn) in [PR 3573](https://github.com/gradio-app/gradio/pull/3573)\n- Support using an empty list as `gr.Dataframe` value, by [@space-nuko](https://github.com/space-nuko) in [PR 3646](https://github.com/gradio-app/gradio/pull/3646)\n- Fixed `gr.Image` not filling the entire element size, by [@space-nuko](https://github.com/space-nuko) in [PR 3649](https://github.com/gradio-app/gradio/pull/3649)\n- Make `gr.Code` support the `lines` property, by [@space-nuko](https://github.com/space-nuko) in [PR 3651](https://github.com/gradio-app/gradio/pull/3651)\n- Fixes certain `_js` return values being double wrapped in an array, by [@space-nuko](https://github.com/space-nuko) in [PR 3594](https://github.com/gradio-app/gradio/pull/3594)\n- Correct the documentation of `gr.File` component to state that its preprocessing method converts the uploaded file to a temporary file, by @RussellLuo in [PR 3660](https://github.com/gradio-app/gradio/pull/3660)\n- Fixed bug in Serializer ValueError text by [@osanseviero](https://github.com/osanseviero) in [PR 3669](https://github.com/gradio-app/gradio/pull/3669)\n- Fix default parameter argument and `gr.Progress` used in same function, by [@space-nuko](https://github.com/space-nuko) in [PR 3671](https://github.com/gradio-app/gradio/pull/3671)\n- Hide `Remove All` button in `gr.Dropdown` single-select mode by [@space-nuko](https://github.com/space-nuko) in [PR 3678](https://github.com/gradio-app/gradio/pull/3678)\n- Fix broken spaces in docs by [@aliabd](https://github.com/aliabd) in [PR 3698](https://github.com/gradio-app/gradio/pull/3698)\n- Fix items in `gr.Dropdown` besides the selected item receiving a checkmark, by [@space-nuko](https://github.com/space-nuko) in [PR 3644](https://github.com/gradio-app/gradio/pull/3644)\n- Fix several `gr.Dropdown` issues and improve usability, by [@space-nuko](https://github.com/space-nuko) in [PR 3705](https://github.com/gradio-app/gradio/pull/3705)\n\n### Documentation Changes:\n\n- Makes some fixes to the Theme Guide related to naming of variables, by [@abidlabs](https://github.com/abidlabs) in [PR 3561](https://github.com/gradio-app/gradio/pull/3561)\n- Documented `HuggingFaceDatasetJSONSaver` by [@osanseviero](https://github.com/osanseviero) in [PR 3604](https://github.com/gradio-app/gradio/pull/3604)\n- Makes some additions to documentation of `Audio` and `State` components, and fixes the `pictionary` demo by [@abidlabs](https://github.com/abidlabs) in [PR 3611](https://github.com/gradio-app/gradio/pull/3611)\n- Fix outdated sharing your app guide by [@aliabd](https://github.com/aliabd) in [PR 3699](https://github.com/gradio-app/gradio/pull/3699)\n\n### Testing and Infrastructure Changes:\n\n- Removed heavily-mocked tests related to comet_ml, wandb, and mlflow as they added a significant amount of test dependencies that prevented installation of test dependencies on Windows environments. By [@abidlabs](https://github.com/abidlabs) in [PR 3608](https://github.com/gradio-app/gradio/pull/3608)\n- Added Windows continuous integration, by [@space-nuko](https://github.com/space-nuko) in [PR 3628](https://github.com/gradio-app/gradio/pull/3628)\n- Switched linting from flake8 + isort to `ruff`, by [@akx](https://github.com/akx) in [PR 3710](https://github.com/gradio-app/gradio/pull/3710)\n\n#\n\n### Full Changelog:\n\n- Mobile responsive iframes in themes guide by [@aliabd](https://github.com/aliabd) in [PR 3562](https://github.com/gradio-app/gradio/pull/3562)\n- Remove extra $demo from theme guide by [@aliabd](https://github.com/aliabd) in [PR 3563](https://github.com/gradio-app/gradio/pull/3563)\n- Set the theme name to be the upstream repo name when loading from the hub by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3595](https://github.com/gradio-app/gradio/pull/3595)\n- Copy everything in website Dockerfile, fix build issues by [@aliabd](https://github.com/aliabd) in [PR 3659](https://github.com/gradio-app/gradio/pull/3659)\n- Raise error when an event is queued but the queue is not configured by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3640](https://github.com/gradio-app/gradio/pull/3640)\n- Allows users to apss in a string name for a built-in theme, by [@abidlabs](https://github.com/abidlabs) in [PR 3641](https://github.com/gradio-app/gradio/pull/3641)\n- Added `orig_name` to Video output in the backend so that the front end can set the right name for downloaded video files by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3700](https://github.com/gradio-app/gradio/pull/3700)\n\n#\n\n## 3.23.0\n\n### New Features:\n\n###### Theme Sharing!\n\nOnce you have created a theme, you can upload it to the HuggingFace Hub to let others view it, use it, and build off of it! You can also download, reuse, and remix other peoples' themes. See https://gradio.app/theming-guide/ for more details.\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3428](https://github.com/gradio-app/gradio/pull/3428)\n\n### Bug Fixes:\n\n- Removes leading spaces from all lines of code uniformly in the `gr.Code()` component. By [@abidlabs](https://github.com/abidlabs) in [PR 3556](https://github.com/gradio-app/gradio/pull/3556)\n- Fixed broken login page, by [@aliabid94](https://github.com/aliabid94) in [PR 3529](https://github.com/gradio-app/gradio/pull/3529)\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Fix rendering of dropdowns to take more space, and related bugs, by [@aliabid94](https://github.com/aliabid94) in [PR 3549](https://github.com/gradio-app/gradio/pull/3549)\n\n#\n\n## 3.22.1\n\n#\n\n### Bug Fixes:\n\n- Restore label bars by [@aliabid94](https://github.com/aliabid94) in [PR 3507](https://github.com/gradio-app/gradio/pull/3507)\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.22.0\n\n### New Features:\n\n###### Official Theme release\n\nGradio now supports a new theme system, which allows you to customize the look and feel of your app. You can now use the `theme=` kwarg to pass in a prebuilt theme, or customize your own! See https://gradio.app/theming-guide/ for more details. By [@aliabid94](https://github.com/aliabid94) in [PR 3470](https://github.com/gradio-app/gradio/pull/3470) and [PR 3497](https://github.com/gradio-app/gradio/pull/3497)\n\n###### `elem_classes`\n\nAdd keyword argument `elem_classes` to Components to control class names of components, in the same manner as existing `elem_id`.\nBy [@aliabid94](https://github.com/aliabid94) in [PR 3466](https://github.com/gradio-app/gradio/pull/3466)\n\n### Bug Fixes:\n\n- Fixes the File.upload() event trigger which broke as part of the change in how we uploaded files by [@abidlabs](https://github.com/abidlabs) in [PR 3462](https://github.com/gradio-app/gradio/pull/3462)\n- Fixed issue with `gr.Request` object failing to handle dictionaries when nested keys couldn't be converted to variable names [#3454](https://github.com/gradio-app/gradio/issues/3454) by [@radames](https://github.com/radames) in [PR 3459](https://github.com/gradio-app/gradio/pull/3459)\n- Fixed bug where css and client api was not working properly when mounted in a subpath by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3482](https://github.com/gradio-app/gradio/pull/3482)\n\n### Documentation Changes:\n\n- Document gr.Error in the docs by [@aliabd](https://github.com/aliabd) in [PR 3465](https://github.com/gradio-app/gradio/pull/3465)\n\n### Testing and Infrastructure Changes:\n\n- Pinned `pyright==1.1.298` for stability by [@abidlabs](https://github.com/abidlabs) in [PR 3475](https://github.com/gradio-app/gradio/pull/3475)\n- Removed `IOComponent.add_interactive_to_config()` by [@space-nuko](https://github.com/space-nuko) in [PR 3476](https://github.com/gradio-app/gradio/pull/3476)\n- Removed `IOComponent.generate_sample()` by [@space-nuko](https://github.com/space-nuko) in [PR 3475](https://github.com/gradio-app/gradio/pull/3483)\n\n#\n\n### Full Changelog:\n\n- Revert primary button background color in dark mode by [@aliabid94](https://github.com/aliabid94) in [PR 3468](https://github.com/gradio-app/gradio/pull/3468)\n\n#\n\n## 3.21.0\n\n### New Features:\n\n###### Theme Sharing \ud83c\udfa8 \ud83e\udd1d\n\nYou can now share your gradio themes with the world!\n\nAfter creating a theme, you can upload it to the HuggingFace Hub to let others view it, use it, and build off of it!\n\n###### Uploading\n\nThere are two ways to upload a theme, via the theme class instance or the command line.\n\n1. Via the class instance\n\n```python\nmy_theme.push_to_hub(repo_name=\"my_theme\",\n version=\"0.2.0\",\n hf_token=\"...\")\n```\n\n2. Via the command line\n\nFirst save the theme to disk\n\n```python\nmy_theme.dump(filename=\"my_theme.json\")\n```\n\nThen use the `upload_theme` command:\n\n```bash\nupload_theme\\\n\"my_theme.json\"\\\n\"my_theme\"\\\n\"0.2.0\"\\\n\"\"\n```\n\nThe `version` must be a valid [semantic version](https://www.geeksforgeeks.org/introduction-semantic-versioning/) string.\n\nThis creates a space on the huggingface hub to host the theme files and show potential users a preview of your theme.\n\nAn example theme space is here: https://huggingface.co/spaces/freddyaboulton/dracula_revamped\n\n###### Downloading\n\nTo use a theme from the hub, use the `from_hub` method on the `ThemeClass` and pass it to your app:\n\n```python\nmy_theme = gr.Theme.from_hub(\"freddyaboulton/my_theme\")\n\nwith gr.Blocks(theme=my_theme) as demo:\n ....\n```\n\nYou can also pass the theme string directly to `Blocks` or `Interface` (`gr.Blocks(theme=\"freddyaboulton/my_theme\")`)\n\nYou can pin your app to an upstream theme version by using semantic versioning expressions.\n\nFor example, the following would ensure the theme we load from the `my_theme` repo was between versions `0.1.0` and `0.2.0`:\n\n```python\nwith gr.Blocks(theme=\"freddyaboulton/my_theme@>=0.1.0,<0.2.0\") as demo:\n ....\n```\n\nby [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3428](https://github.com/gradio-app/gradio/pull/3428)\n\n###### Code component \ud83e\uddbe\n\nNew code component allows you to enter, edit and display code with full syntax highlighting by [@pngwn](https://github.com/pngwn) in [PR 3421](https://github.com/gradio-app/gradio/pull/3421)\n\n###### The `Chatbot` component now supports audio, video, and images\n\nThe `Chatbot` component now supports audio, video, and images with a simple syntax: simply\npass in a tuple with the URL or filepath (the second optional element of the tuple is alt text), and the image/audio/video will be displayed:\n\n```python\ngr.Chatbot([\n ((\"driving.mp4\",), \"cool video\"),\n ((\"cantina.wav\",), \"cool audio\"),\n ((\"lion.jpg\", \"A lion\"), \"cool pic\"),\n]).style(height=800)\n```\n\n\"image\"\n\nNote: images were previously supported via Markdown syntax and that is still supported for backwards compatibility. By [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3413](https://github.com/gradio-app/gradio/pull/3413)\n\n- Allow consecutive function triggers with `.then` and `.success` by [@aliabid94](https://github.com/aliabid94) in [PR 3430](https://github.com/gradio-app/gradio/pull/3430)\n\n- New code component allows you to enter, edit and display code with full syntax highlighting by [@pngwn](https://github.com/pngwn) in [PR 3421](https://github.com/gradio-app/gradio/pull/3421)\n\n![](https://user-images.githubusercontent.com/12937446/224116643-5cfb94b3-93ce-43ee-bb7b-c25c3b66e0a1.png)\n\n- Added the `.select()` event listener, which also includes event data that can be passed as an argument to a function with type hint `gr.SelectData`. The following components support the `.select()` event listener: Chatbot, CheckboxGroup, Dataframe, Dropdown, File, Gallery, HighlightedText, Label, Radio, TabItem, Tab, Textbox. Example usage:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gallery = gr.Gallery([\"images/1.jpg\", \"images/2.jpg\", \"images/3.jpg\"])\n selected_index = gr.Textbox()\n\n def on_select(evt: gr.SelectData):\n return evt.index\n\n gallery.select(on_select, None, selected_index)\n```\n\nBy [@aliabid94](https://github.com/aliabid94) in [PR 3399](https://github.com/gradio-app/gradio/pull/3399)\n\n- The `Textbox` component now includes a copy button by [@abidlabs](https://github.com/abidlabs) in [PR 3452](https://github.com/gradio-app/gradio/pull/3452)\n\n### Bug Fixes:\n\n- Use `huggingface_hub` to send telemetry on `interface` and `blocks`; eventually to replace segment by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3342](https://github.com/gradio-app/gradio/pull/3342)\n- Ensure load events created by components (randomize for slider, callable values) are never queued unless every is passed by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3391](https://github.com/gradio-app/gradio/pull/3391)\n- Prevent in-place updates of `generic_update` by shallow copying by [@gitgithan](https://github.com/gitgithan) in [PR 3405](https://github.com/gradio-app/gradio/pull/3405) to fix [#3282](https://github.com/gradio-app/gradio/issues/3282)\n- Fix bug caused by not importing `BlockContext` in `utils.py` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3424](https://github.com/gradio-app/gradio/pull/3424)\n- Ensure dropdown does not highlight partial matches by [@pngwn](https://github.com/pngwn) in [PR 3421](https://github.com/gradio-app/gradio/pull/3421)\n- Fix mic button display by [@aliabid94](https://github.com/aliabid94) in [PR 3456](https://github.com/gradio-app/gradio/pull/3456)\n\n### Documentation Changes:\n\n- Added a section on security and access when sharing Gradio apps by [@abidlabs](https://github.com/abidlabs) in [PR 3408](https://github.com/gradio-app/gradio/pull/3408)\n- Add Chinese README by [@uanu2002](https://github.com/uanu2002) in [PR 3394](https://github.com/gradio-app/gradio/pull/3394)\n- Adds documentation for web components by [@abidlabs](https://github.com/abidlabs) in [PR 3407](https://github.com/gradio-app/gradio/pull/3407)\n- Fixed link in Chinese readme by [@eltociear](https://github.com/eltociear) in [PR 3417](https://github.com/gradio-app/gradio/pull/3417)\n- Document Blocks methods by [@aliabd](https://github.com/aliabd) in [PR 3427](https://github.com/gradio-app/gradio/pull/3427)\n- Fixed bug where event handlers were not showing up in documentation by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3434](https://github.com/gradio-app/gradio/pull/3434)\n\n### Testing and Infrastructure Changes:\n\n- Fixes tests that were failing locally but passing on CI by [@abidlabs](https://github.com/abidlabs) in [PR 3411](https://github.com/gradio-app/gradio/pull/3411)\n- Remove codecov from the repo by [@aliabd](https://github.com/aliabd) in [PR 3415](https://github.com/gradio-app/gradio/pull/3415)\n\n#\n\n### Full Changelog:\n\n- Prevent in-place updates of `generic_update` by shallow copying by [@gitgithan](https://github.com/gitgithan) in [PR 3405](https://github.com/gradio-app/gradio/pull/3405) to fix [#3282](https://github.com/gradio-app/gradio/issues/3282)\n- Persist file names of files uploaded through any Gradio component by [@abidlabs](https://github.com/abidlabs) in [PR 3412](https://github.com/gradio-app/gradio/pull/3412)\n- Fix markdown embedded component in docs by [@aliabd](https://github.com/aliabd) in [PR 3410](https://github.com/gradio-app/gradio/pull/3410)\n- Clean up event listeners code by [@aliabid94](https://github.com/aliabid94) in [PR 3420](https://github.com/gradio-app/gradio/pull/3420)\n- Fix css issue with spaces logo by [@aliabd](https://github.com/aliabd) in [PR 3422](https://github.com/gradio-app/gradio/pull/3422)\n- Makes a few fixes to the `JSON` component (show_label parameter, icons) in [@abidlabs](https://github.com/abidlabs) in [PR 3451](https://github.com/gradio-app/gradio/pull/3451)\n\n#\n\n## 3.20.1\n\n### New Features:\n\n- Add `height` kwarg to style in `gr.Chatbot()` component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3369](https://github.com/gradio-app/gradio/pull/3369)\n\n```python\nchatbot = gr.Chatbot().style(height=500)\n```\n\n### Bug Fixes:\n\n- Ensure uploaded images are always shown in the sketch tool by [@pngwn](https://github.com/pngwn) in [PR 3386](https://github.com/gradio-app/gradio/pull/3386)\n- Fixes bug where when if fn is a non-static class member, then self should be ignored as the first param of the fn by [@or25](https://github.com/or25) in [PR #3227](https://github.com/gradio-app/gradio/pull/3227)\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.20.0\n\n### New Features:\n\n###### Release event for Slider\n\nNow you can trigger your python function to run when the slider is released as opposed to every slider change value!\n\nSimply use the `release` method on the slider\n\n```python\nslider.release(function, inputs=[...], outputs=[...], api_name=\"predict\")\n```\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3353](https://github.com/gradio-app/gradio/pull/3353)\n\n###### Dropdown Component Updates\n\nThe standard dropdown component now supports searching for choices. Also when `multiselect` is `True`, you can specify `max_choices` to set the maximum number of choices you want the user to be able to select from the dropdown component.\n\n```python\ngr.Dropdown(label=\"Choose your favorite colors\", choices=[\"red\", \"blue\", \"green\", \"yellow\", \"orange\"], multiselect=True, max_choices=2)\n```\n\nby [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3211](https://github.com/gradio-app/gradio/pull/3211)\n\n###### Download button for images \ud83d\uddbc\ufe0f\n\nOutput images will now automatically have a download button displayed to make it easier to save and share\nthe results of Machine Learning art models.\n\n![download_sketch](https://user-images.githubusercontent.com/41651716/221025113-e693bf41-eabd-42b3-a4f2-26f2708d98fe.gif)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3297](https://github.com/gradio-app/gradio/pull/3297)\n\n- Updated image upload component to accept all image formats, including lossless formats like .webp by [@fienestar](https://github.com/fienestar) in [PR 3225](https://github.com/gradio-app/gradio/pull/3225)\n- Adds a disabled mode to the `gr.Button` component by setting `interactive=False` by [@abidlabs](https://github.com/abidlabs) in [PR 3266](https://github.com/gradio-app/gradio/pull/3266) and [PR 3288](https://github.com/gradio-app/gradio/pull/3288)\n- Adds visual feedback to the when the Flag button is clicked, by [@abidlabs](https://github.com/abidlabs) in [PR 3289](https://github.com/gradio-app/gradio/pull/3289)\n- Adds ability to set `flagging_options` display text and saved flag separately by [@abidlabs](https://github.com/abidlabs) in [PR 3289](https://github.com/gradio-app/gradio/pull/3289)\n- Allow the setting of `brush_radius` for the `Image` component both as a default and via `Image.update()` by [@pngwn](https://github.com/pngwn) in [PR 3277](https://github.com/gradio-app/gradio/pull/3277)\n- Added `info=` argument to form components to enable extra context provided to users, by [@aliabid94](https://github.com/aliabid94) in [PR 3291](https://github.com/gradio-app/gradio/pull/3291)\n- Allow developers to access the username of a logged-in user from the `gr.Request()` object using the `.username` attribute by [@abidlabs](https://github.com/abidlabs) in [PR 3296](https://github.com/gradio-app/gradio/pull/3296)\n- Add `preview` option to `Gallery.style` that launches the gallery in preview mode when first loaded by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3345](https://github.com/gradio-app/gradio/pull/3345)\n\n### Bug Fixes:\n\n- Ensure `mirror_webcam` is always respected by [@pngwn](https://github.com/pngwn) in [PR 3245](https://github.com/gradio-app/gradio/pull/3245)\n- Fix issue where updated markdown links were not being opened in a new tab by [@gante](https://github.com/gante) in [PR 3236](https://github.com/gradio-app/gradio/pull/3236)\n- API Docs Fixes by [@aliabd](https://github.com/aliabd) in [PR 3287](https://github.com/gradio-app/gradio/pull/3287)\n- Added a timeout to queue messages as some demos were experiencing infinitely growing queues from active jobs waiting forever for clients to respond by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3196](https://github.com/gradio-app/gradio/pull/3196)\n- Fixes the height of rendered LaTeX images so that they match the height of surrounding text by [@abidlabs](https://github.com/abidlabs) in [PR 3258](https://github.com/gradio-app/gradio/pull/3258) and in [PR 3276](https://github.com/gradio-app/gradio/pull/3276)\n- Fix bug where matplotlib images where always too small on the front end by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3274](https://github.com/gradio-app/gradio/pull/3274)\n- Remove embed's `initial_height` when loading is complete so the embed finds its natural height once it is loaded [@pngwn](https://github.com/pngwn) in [PR 3292](https://github.com/gradio-app/gradio/pull/3292)\n- Prevent Sketch from crashing when a default image is provided by [@pngwn](https://github.com/pngwn) in [PR 3277](https://github.com/gradio-app/gradio/pull/3277)\n- Respect the `shape` argument on the front end when creating Image Sketches by [@pngwn](https://github.com/pngwn) in [PR 3277](https://github.com/gradio-app/gradio/pull/3277)\n- Fix infinite loop caused by setting `Dropdown's` value to be `[]` and adding a change event on the dropdown by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3295](https://github.com/gradio-app/gradio/pull/3295)\n- Fix change event listed twice in image docs by [@aliabd](https://github.com/aliabd) in [PR 3318](https://github.com/gradio-app/gradio/pull/3318)\n- Fix bug that cause UI to be vertically centered at all times by [@pngwn](https://github.com/pngwn) in [PR 3336](https://github.com/gradio-app/gradio/pull/3336)\n- Fix bug where `height` set in `Gallery.style` was not respected by the front-end by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3343](https://github.com/gradio-app/gradio/pull/3343)\n- Ensure markdown lists are rendered correctly by [@pngwn](https://github.com/pngwn) in [PR 3341](https://github.com/gradio-app/gradio/pull/3341)\n- Ensure that the initial empty value for `gr.Dropdown(Multiselect=True)` is an empty list and the initial value for `gr.Dropdown(Multiselect=False)` is an empty string by [@pngwn](https://github.com/pngwn) in [PR 3338](https://github.com/gradio-app/gradio/pull/3338)\n- Ensure uploaded images respect the shape property when the canvas is also enabled by [@pngwn](https://github.com/pngwn) in [PR 3351](https://github.com/gradio-app/gradio/pull/3351)\n- Ensure that Google Analytics works correctly when gradio apps are created with `analytics_enabled=True` by [@abidlabs](https://github.com/abidlabs) in [PR 3349](https://github.com/gradio-app/gradio/pull/3349)\n- Fix bug where files were being re-uploaded after updates by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3375](https://github.com/gradio-app/gradio/pull/3375)\n- Fix error when using backen_fn and custom js at the same time by [@jialeicui](https://github.com/jialeicui) in [PR 3358](https://github.com/gradio-app/gradio/pull/3358)\n- Support new embeds for huggingface spaces subdomains by [@pngwn](https://github.com/pngwn) in [PR 3367](https://github.com/gradio-app/gradio/pull/3367)\n\n### Documentation Changes:\n\n- Added the `types` field to the dependency field in the config by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3315](https://github.com/gradio-app/gradio/pull/3315)\n- Gradio Status Page by [@aliabd](https://github.com/aliabd) in [PR 3331](https://github.com/gradio-app/gradio/pull/3331)\n- Adds a Guide on setting up a dashboard from Supabase data using the `gr.BarPlot`\n component by [@abidlabs](https://github.com/abidlabs) in [PR 3275](https://github.com/gradio-app/gradio/pull/3275)\n\n### Testing and Infrastructure Changes:\n\n- Adds a script to benchmark the performance of the queue and adds some instructions on how to use it. By [@freddyaboulton](https://github.com/freddyaboulton) and [@abidlabs](https://github.com/abidlabs) in [PR 3272](https://github.com/gradio-app/gradio/pull/3272)\n- Flaky python tests no longer cancel non-flaky tests by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3344](https://github.com/gradio-app/gradio/pull/3344)\n\n### Breaking Changes:\n\n- Chatbot bubble colors can no longer be set by `chatbot.style(color_map=)` by [@aliabid94] in [PR 3370](https://github.com/gradio-app/gradio/pull/3370)\n\n### Full Changelog:\n\n- Fixed comment typo in components.py by [@eltociear](https://github.com/eltociear) in [PR 3235](https://github.com/gradio-app/gradio/pull/3235)\n- Cleaned up chatbot ui look and feel by [@aliabid94] in [PR 3370](https://github.com/gradio-app/gradio/pull/3370)\n\n#\n\n## 3.19.1\n\n#\n\n### Bug Fixes:\n\n- UI fixes including footer and API docs by [@aliabid94](https://github.com/aliabid94) in [PR 3242](https://github.com/gradio-app/gradio/pull/3242)\n- Updated image upload component to accept all image formats, including lossless formats like .webp by [@fienestar](https://github.com/fienestar) in [PR 3225](https://github.com/gradio-app/gradio/pull/3225)\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Added backend support for themes by [@aliabid94](https://github.com/aliabid94) in [PR 2931](https://github.com/gradio-app/gradio/pull/2931)\n- Added support for button sizes \"lg\" (default) and \"sm\".\n\n#\n\n## 3.19.0\n\n### New Features:\n\n###### Improved embedding experience\n\nWhen embedding a spaces-hosted gradio app as a web component, you now get an improved UI linking back to the original space, better error handling and more intelligent load performance. No changes are required to your code to benefit from this enhanced experience; simply upgrade your gradio SDK to the latest version.\n\n![](https://user-images.githubusercontent.com/12937446/219653294-86937632-72c1-4e93-a77c-af705d49382a.png)\n\nThis behaviour is configurable. You can disable the info panel at the bottom by passing `info=\"false\"`. You can disable the container entirely by passing `container=\"false\"`.\n\nError statuses are reported in the UI with an easy way for end-users to report problems to the original space author via the community tab of that Hugginface space:\n\n![](https://user-images.githubusercontent.com/12937446/219655499-88019443-d694-44e7-9e6d-242e19d10a5c.png)\n\nBy default, gradio apps are lazy loaded, vastly improving performance when there are several demos on the page. Metadata is loaded ahead of time, but the space will only be loaded and rendered when it is in view.\n\nThis behaviour is configurable. You can pass `eager=\"true\"` to load and render the space regardless of whether or not it is currently on the screen.\n\nby [@pngwn](https://github.com/pngwn) in [PR 3205](https://github.com/gradio-app/gradio/pull/3205)\n\n###### New `gr.BarPlot` component! \ud83d\udcca\n\nCreate interactive bar plots from a high-level interface with `gr.BarPlot`.\nNo need to remember matplotlib syntax anymore!\n\nExample usage:\n\n```python\nimport gradio as gr\nimport pandas as pd\n\nsimple = pd.DataFrame({\n 'a': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],\n 'b': [28, 55, 43, 91, 81, 53, 19, 87, 52]\n})\n\nwith gr.Blocks() as demo:\n gr.BarPlot(\n simple,\n x=\"a\",\n y=\"b\",\n title=\"Simple Bar Plot with made up data\",\n tooltip=['a', 'b'],\n )\n\ndemo.launch()\n```\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3157](https://github.com/gradio-app/gradio/pull/3157)\n\n###### Bokeh plots are back! \ud83c\udf20\n\nFixed a bug that prevented bokeh plots from being displayed on the front end and extended support for both 2.x and 3.x versions of bokeh!\n\n![image](https://user-images.githubusercontent.com/41651716/219468324-0d82e07f-8fb4-4ff9-b40c-8250b29e45f7.png)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3212](https://github.com/gradio-app/gradio/pull/3212)\n\n### Bug Fixes:\n\n- Adds ability to add a single message from the bot or user side. Ex: specify `None` as the second value in the tuple, to add a single message in the chatbot from the \"bot\" side.\n\n```python\ngr.Chatbot([(\"Hi, I'm DialoGPT. Try asking me a question.\", None)])\n```\n\nBy [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3165](https://github.com/gradio-app/gradio/pull/3165)\n\n- Fixes `gr.utils.delete_none` to only remove props whose values are `None` from the config by [@abidlabs](https://github.com/abidlabs) in [PR 3188](https://github.com/gradio-app/gradio/pull/3188)\n- Fix bug where embedded demos were not loading files properly by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3177](https://github.com/gradio-app/gradio/pull/3177)\n- The `change` event is now triggered when users click the 'Clear All' button of the multiselect DropDown component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3195](https://github.com/gradio-app/gradio/pull/3195)\n- Stops File component from freezing when a large file is uploaded by [@aliabid94](https://github.com/aliabid94) in [PR 3191](https://github.com/gradio-app/gradio/pull/3191)\n- Support Chinese pinyin in Dataframe by [@aliabid94](https://github.com/aliabid94) in [PR 3206](https://github.com/gradio-app/gradio/pull/3206)\n- The `clear` event is now triggered when images are cleared by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3218](https://github.com/gradio-app/gradio/pull/3218)\n- Fix bug where auth cookies where not sent when connecting to an app via http by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3223](https://github.com/gradio-app/gradio/pull/3223)\n- Ensure latext CSS is always applied in light and dark mode by [@pngwn](https://github.com/pngwn) in [PR 3233](https://github.com/gradio-app/gradio/pull/3233)\n\n### Documentation Changes:\n\n- Sort components in docs by alphabetic order by [@aliabd](https://github.com/aliabd) in [PR 3152](https://github.com/gradio-app/gradio/pull/3152)\n- Changes to W&B guide by [@scottire](https://github.com/scottire) in [PR 3153](https://github.com/gradio-app/gradio/pull/3153)\n- Keep pnginfo metadata for gallery by [@wfng92](https://github.com/wfng92) in [PR 3150](https://github.com/gradio-app/gradio/pull/3150)\n- Add a section on how to run a Gradio app locally [@osanseviero](https://github.com/osanseviero) in [PR 3170](https://github.com/gradio-app/gradio/pull/3170)\n- Fixed typos in gradio events function documentation by [@vidalmaxime](https://github.com/vidalmaxime) in [PR 3168](https://github.com/gradio-app/gradio/pull/3168)\n- Added an example using Gradio's batch mode with the diffusers library by [@abidlabs](https://github.com/abidlabs) in [PR 3224](https://github.com/gradio-app/gradio/pull/3224)\n\n#\n\n#\n\n### Full Changelog:\n\n- Fix demos page css and add close demos button by [@aliabd](https://github.com/aliabd) in [PR 3151](https://github.com/gradio-app/gradio/pull/3151)\n- Caches temp files from base64 input data by giving them a deterministic path based on the contents of data by [@abidlabs](https://github.com/abidlabs) in [PR 3197](https://github.com/gradio-app/gradio/pull/3197)\n- Better warnings (when there is a mismatch between the number of output components and values returned by a function, or when the `File` component or `UploadButton` component includes a `file_types` parameter along with `file_count==\"dir\"`) by [@abidlabs](https://github.com/abidlabs) in [PR 3194](https://github.com/gradio-app/gradio/pull/3194)\n- Raises a `gr.Error` instead of a regular Python error when you use `gr.Interface.load()` to load a model and there's an error querying the HF API by [@abidlabs](https://github.com/abidlabs) in [PR 3194](https://github.com/gradio-app/gradio/pull/3194)\n- Fixed gradio share links so that they are persistent and do not reset if network\n connection is disrupted by by [XciD](https://github.com/XciD), [Wauplin](https://github.com/Wauplin), and [@abidlabs](https://github.com/abidlabs) in [PR 3149](https://github.com/gradio-app/gradio/pull/3149) and a follow-up to allow it to work for users upgrading from a previous Gradio version in [PR 3221](https://github.com/gradio-app/gradio/pull/3221)\n\n#\n\n## 3.18.0\n\n### New Features:\n\n###### Revamped Stop Button for Interfaces \ud83d\uded1\n\nIf your Interface function is a generator, there used to be a separate `Stop` button displayed next\nto the `Submit` button.\n\nWe've revamed the `Submit` button so that it turns into a `Stop` button during the generation process.\nClicking on the `Stop` button will cancel the generation and turn it back to a `Submit` button.\nThe `Stop` button will automatically turn back to a `Submit` button at the end of the generation if you don't use it!\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3124](https://github.com/gradio-app/gradio/pull/3124)\n\n###### Queue now works with reload mode!\n\nYou can now call `queue` on your `demo` outside of the `if __name__ == \"__main__\"` block and\nrun the script in reload mode with the `gradio` command.\n\nAny changes to the `app.py` file will be reflected in the webpage automatically and the queue will work\nproperly!\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3089)\n\n###### Allow serving files from additional directories\n\n```python\ndemo = gr.Interface(...)\ndemo.launch(\n file_directories=[\"/var/lib/demo/path/to/resources\"]\n)\n```\n\nBy [@maxaudron](https://github.com/maxaudron) in [PR 3075](https://github.com/gradio-app/gradio/pull/3075)\n\n### Bug Fixes:\n\n- Fixes URL resolution on Windows by [@abidlabs](https://github.com/abidlabs) in [PR 3108](https://github.com/gradio-app/gradio/pull/3108)\n- Example caching now works with components without a label attribute (e.g. `Column`) by [@abidlabs](https://github.com/abidlabs) in [PR 3123](https://github.com/gradio-app/gradio/pull/3123)\n- Ensure the Video component correctly resets the UI state when a new video source is loaded and reduce choppiness of UI by [@pngwn](https://github.com/abidlabs) in [PR 3117](https://github.com/gradio-app/gradio/pull/3117)\n- Fixes loading private Spaces by [@abidlabs](https://github.com/abidlabs) in [PR 3068](https://github.com/gradio-app/gradio/pull/3068)\n- Added a warning when attempting to launch an `Interface` via the `%%blocks` jupyter notebook magic command by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3126](https://github.com/gradio-app/gradio/pull/3126)\n- Fixes bug where interactive output image cannot be set when in edit mode by [@dawoodkhan82](https://github.com/@dawoodkhan82) in [PR 3135](https://github.com/gradio-app/gradio/pull/3135)\n- A share link will automatically be created when running on Sagemaker notebooks so that the front-end is properly displayed by [@abidlabs](https://github.com/abidlabs) in [PR 3137](https://github.com/gradio-app/gradio/pull/3137)\n- Fixes a few dropdown component issues; hide checkmark next to options as expected, and keyboard hover is visible by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3145]https://github.com/gradio-app/gradio/pull/3145)\n- Fixed bug where example pagination buttons were not visible in dark mode or displayed under the examples table. By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3144](https://github.com/gradio-app/gradio/pull/3144)\n- Fixed bug where the font color of axis labels and titles for native plots did not respond to dark mode preferences. By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3146](https://github.com/gradio-app/gradio/pull/3146)\n\n### Documentation Changes:\n\n- Added a guide on the 4 kinds of Gradio Interfaces by [@yvrjsharma](https://github.com/yvrjsharma) and [@abidlabs](https://github.com/abidlabs) in [PR 3003](https://github.com/gradio-app/gradio/pull/3003)\n- Explained that the parameters in `launch` will not be respected when using reload mode, e.g. `gradio` command by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3089)\n- Added a demo to show how to set up variable numbers of outputs in Gradio by [@abidlabs](https://github.com/abidlabs) in [PR 3127](https://github.com/gradio-app/gradio/pull/3127)\n- Updated docs to reflect that the `equal_height` parameter should be passed to the `.style()` method of `gr.Row()` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3125](https://github.com/gradio-app/gradio/pull/3125)\n\n#\n\n#\n\n### Full Changelog:\n\n- Changed URL of final image for `fake_diffusion` demos by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3120](https://github.com/gradio-app/gradio/pull/3120)\n\n#\n\n## 3.17.1\n\n### New Features:\n\n###### iOS image rotation fixed \ud83d\udd04\n\nPreviously photos uploaded via iOS would be rotated after processing. This has been fixed by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3091)\n\n######### Before\n\n![image](https://user-images.githubusercontent.com/41651716/215846507-a36e9d05-1ac2-4867-8ab3-ce045a9415d9.png)\n\n######### After\n\n![image](https://user-images.githubusercontent.com/41651716/215846554-e41773ed-70f0-491a-9952-6a18babf91ef.png)\n\n###### Run on Kaggle kernels \ud83e\uddea\n\nA share link will automatically be created when running on Kaggle kernels (notebooks) so that the front-end is properly displayed.\n\n![image](https://user-images.githubusercontent.com/41651716/216104254-2cf55599-449c-436c-b57e-40f6a83f9eee.png)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3101](https://github.com/gradio-app/gradio/pull/3101)\n\n### Bug Fixes:\n\n- Fix bug where examples were not rendered correctly for demos created with Blocks api that had multiple input compinents by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3090](https://github.com/gradio-app/gradio/pull/3090)\n- Fix change event listener for JSON, HighlightedText, Chatbot by [@aliabid94](https://github.com/aliabid94) in [PR 3095](https://github.com/gradio-app/gradio/pull/3095)\n- Fixes bug where video and file change event not working [@tomchang25](https://github.com/tomchang25) in [PR 3098](https://github.com/gradio-app/gradio/pull/3098)\n- Fixes bug where static_video play and pause event not working [@tomchang25](https://github.com/tomchang25) in [PR 3098](https://github.com/gradio-app/gradio/pull/3098)\n- Fixed `Gallery.style(grid=...)` by by [@aliabd](https://github.com/aliabd) in [PR 3107](https://github.com/gradio-app/gradio/pull/3107)\n\n### Documentation Changes:\n\n- Update chatbot guide to include blocks demo and markdown support section by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3023](https://github.com/gradio-app/gradio/pull/3023)\n\n* Fix a broken link in the Quick Start guide, by [@cakiki](https://github.com/cakiki) in [PR 3109](https://github.com/gradio-app/gradio/pull/3109)\n* Better docs navigation on mobile by [@aliabd](https://github.com/aliabd) in [PR 3112](https://github.com/gradio-app/gradio/pull/3112)\n* Add a guide on using Gradio with [Comet](https://comet.com/), by [@DN6](https://github.com/DN6/) in [PR 3058](https://github.com/gradio-app/gradio/pull/3058)\n\n#\n\n#\n\n### Full Changelog:\n\n- Set minimum `markdown-it-py` version to `2.0.0` so that the dollar math plugin is compatible by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3102](https://github.com/gradio-app/gradio/pull/3102)\n\n#\n\n## 3.17.0\n\n### New Features:\n\n###### Extended support for Interface.load! \ud83c\udfd7\ufe0f\n\nYou can now load `image-to-text` and `conversational` pipelines from the hub!\n\n###### Image-to-text Demo\n\n```python\nio = gr.Interface.load(\"models/nlpconnect/vit-gpt2-image-captioning\",\n api_key=\"\")\nio.launch()\n```\n\n\"image\"\n\n###### conversational Demo\n\n```python\nchatbot = gr.Interface.load(\"models/microsoft/DialoGPT-medium\",\n api_key=\"\")\nchatbot.launch()\n```\n\n![chatbot_load](https://user-images.githubusercontent.com/41651716/213260220-3eaa25b7-a38b-48c6-adeb-2718bdf297a2.gif)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3011](https://github.com/gradio-app/gradio/pull/3011)\n\n###### Download Button added to Model3D Output Component \ud83d\udce5\n\nNo need for an additional file output component to enable model3d file downloads anymore. We now added a download button to the model3d component itself.\n\n\"Screenshot\n\nBy [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3014](https://github.com/gradio-app/gradio/pull/3014)\n\n###### Fixing Auth on Spaces \ud83d\udd11\n\nAuthentication on spaces works now! Third party cookies must be enabled on your browser to be able\nto log in. Some browsers disable third party cookies by default (Safari, Chrome Incognito).\n\n![auth_spaces](https://user-images.githubusercontent.com/41651716/215528417-09538933-0576-4d1d-b3b9-1e877ab01905.gif)\n\n### Bug Fixes:\n\n- Fixes bug where interpretation event was not configured correctly by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2993](https://github.com/gradio-app/gradio/pull/2993)\n- Fix relative import bug in reload mode by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2992](https://github.com/gradio-app/gradio/pull/2992)\n- Fixes bug where png files were not being recognized when uploading images by [@abidlabs](https://github.com/abidlabs) in [PR 3002](https://github.com/gradio-app/gradio/pull/3002)\n- Fixes bug where external Spaces could not be loaded and used as functions if they returned files by [@abidlabs](https://github.com/abidlabs) in [PR 3004](https://github.com/gradio-app/gradio/pull/3004)\n- Fix bug where file serialization output was not JSON serializable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2999](https://github.com/gradio-app/gradio/pull/2999)\n- Fixes bug where png files were not being recognized when uploading images by [@abidlabs](https://github.com/abidlabs) in [PR 3002](https://github.com/gradio-app/gradio/pull/3002)\n- Fixes bug where temporary uploaded files were not being added to temp sets by [@abidlabs](https://github.com/abidlabs) in [PR 3005](https://github.com/gradio-app/gradio/pull/3005)\n- Fixes issue where markdown support in chatbot breaks older demos [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3006](https://github.com/gradio-app/gradio/pull/3006)\n- Fixes the `/file/` route that was broken in a recent change in [PR 3010](https://github.com/gradio-app/gradio/pull/3010)\n- Fix bug where the Image component could not serialize image urls by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2957](https://github.com/gradio-app/gradio/pull/2957)\n- Fix forwarding for guides after SEO renaming by [@aliabd](https://github.com/aliabd) in [PR 3017](https://github.com/gradio-app/gradio/pull/3017)\n- Switch all pages on the website to use latest stable gradio by [@aliabd](https://github.com/aliabd) in [PR 3016](https://github.com/gradio-app/gradio/pull/3016)\n- Fix bug related to deprecated parameters in `huggingface_hub` for the HuggingFaceDatasetSaver in [PR 3025](https://github.com/gradio-app/gradio/pull/3025)\n- Added better support for symlinks in the way absolute paths are resolved by [@abidlabs](https://github.com/abidlabs) in [PR 3037](https://github.com/gradio-app/gradio/pull/3037)\n- Fix several minor frontend bugs (loading animation, examples as gallery) frontend [@aliabid94](https://github.com/3026) in [PR 2961](https://github.com/gradio-app/gradio/pull/3026).\n- Fixes bug that the chatbot sample code does not work with certain input value by [@petrov826](https://github.com/petrov826) in [PR 3039](https://github.com/gradio-app/gradio/pull/3039).\n- Fix shadows for form element and ensure focus styles more visible in dark mode [@pngwn](https://github.com/pngwn) in [PR 3042](https://github.com/gradio-app/gradio/pull/3042).\n- Fixed bug where the Checkbox and Dropdown change events were not triggered in response to other component changes by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3045](https://github.com/gradio-app/gradio/pull/3045)\n- Fix bug where the queue was not properly restarted after launching a `closed` app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3022](https://github.com/gradio-app/gradio/pull/3022)\n- Adding missing embedded components on docs by [@aliabd](https://github.com/aliabd) in [PR 3027](https://github.com/gradio-app/gradio/pull/3027)\n- Fixes bug where app would crash if the `file_types` parameter of `gr.File` or `gr.UploadButton` was not a list by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3048](https://github.com/gradio-app/gradio/pull/3048)\n- Ensure CSS mounts correctly regardless of how many Gradio instances are on the page [@pngwn](https://github.com/pngwn) in [PR 3059](https://github.com/gradio-app/gradio/pull/3059).\n- Fix bug where input component was not hidden in the frontend for `UploadButton` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3053](https://github.com/gradio-app/gradio/pull/3053)\n- Fixes issue where after clicking submit or undo, the sketch output wouldn't clear. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3047](https://github.com/gradio-app/gradio/pull/3047)\n- Ensure spaces embedded via the web component always use the correct URLs for server requests and change ports for testing to avoid strange collisions when users are working with embedded apps locally by [@pngwn](https://github.com/pngwn) in [PR 3065](https://github.com/gradio-app/gradio/pull/3065)\n- Preserve selected image of Gallery through updated by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3061](https://github.com/gradio-app/gradio/pull/3061)\n- Fix bug where auth was not respected on HF spaces by [@freddyaboulton](https://github.com/freddyaboulton) and [@aliabid94](https://github.com/aliabid94) in [PR 3049](https://github.com/gradio-app/gradio/pull/3049)\n- Fixes bug where tabs selected attribute not working if manually change tab by [@tomchang25](https://github.com/tomchang25) in [3055](https://github.com/gradio-app/gradio/pull/3055)\n- Change chatbot to show dots on progress, and fix bug where chatbot would not stick to bottom in the case of images by [@aliabid94](https://github.com/aliabid94) in [PR 3067](https://github.com/gradio-app/gradio/pull/3079)\n\n### Documentation Changes:\n\n- SEO improvements to guides by[@aliabd](https://github.com/aliabd) in [PR 2915](https://github.com/gradio-app/gradio/pull/2915)\n- Use `gr.LinePlot` for the `blocks_kinematics` demo by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2998](https://github.com/gradio-app/gradio/pull/2998)\n- Updated the `interface_series_load` to include some inline markdown code by [@abidlabs](https://github.com/abidlabs) in [PR 3051](https://github.com/gradio-app/gradio/pull/3051)\n\n### Testing and Infrastructure Changes:\n\n- Adds a GitHub action to test if any large files (> 5MB) are present by [@abidlabs](https://github.com/abidlabs) in [PR 3013](https://github.com/gradio-app/gradio/pull/3013)\n\n#\n\n### Full Changelog:\n\n- Rewrote frontend using CSS variables for themes by [@pngwn](https://github.com/pngwn) in [PR 2840](https://github.com/gradio-app/gradio/pull/2840)\n- Moved telemetry requests to run on background threads by [@abidlabs](https://github.com/abidlabs) in [PR 3054](https://github.com/gradio-app/gradio/pull/3054)\n\n#\n\n## 3.16.2\n\n#\n\n### Bug Fixes:\n\n- Fixed file upload fails for files with zero size by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2923](https://github.com/gradio-app/gradio/pull/2923)\n- Fixed bug where `mount_gradio_app` would not launch if the queue was enabled in a gradio app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2939](https://github.com/gradio-app/gradio/pull/2939)\n- Fix custom long CSS handling in Blocks by [@anton-l](https://github.com/anton-l) in [PR 2953](https://github.com/gradio-app/gradio/pull/2953)\n- Recovers the dropdown change event by [@abidlabs](https://github.com/abidlabs) in [PR 2954](https://github.com/gradio-app/gradio/pull/2954).\n- Fix audio file output by [@aliabid94](https://github.com/aliabid94) in [PR 2961](https://github.com/gradio-app/gradio/pull/2961).\n- Fixed bug where file extensions of really long files were not kept after download by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2929](https://github.com/gradio-app/gradio/pull/2929)\n- Fix bug where outputs for examples where not being returned by the backend by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2955](https://github.com/gradio-app/gradio/pull/2955)\n- Fix bug in `blocks_plug` demo that prevented switching tabs programmatically with python [@TashaSkyUp](https://github.com/https://github.com/TashaSkyUp) in [PR 2971](https://github.com/gradio-app/gradio/pull/2971).\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.16.1\n\n#\n\n### Bug Fixes:\n\n- Fix audio file output by [@aliabid94](https://github.com/aliabid94) in [PR 2950](https://github.com/gradio-app/gradio/pull/2950).\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.16.0\n\n### New Features:\n\n###### Send custom progress updates by adding a `gr.Progress` argument after the input arguments to any function. Example:\n\n```python\ndef reverse(word, progress=gr.Progress()):\n progress(0, desc=\"Starting\")\n time.sleep(1)\n new_string = \"\"\n for letter in progress.tqdm(word, desc=\"Reversing\"):\n time.sleep(0.25)\n new_string = letter + new_string\n return new_string\n\ndemo = gr.Interface(reverse, gr.Text(), gr.Text())\n```\n\nProgress indicator bar by [@aliabid94](https://github.com/aliabid94) in [PR 2750](https://github.com/gradio-app/gradio/pull/2750).\n\n- Added `title` argument to `TabbedInterface` by @MohamedAliRashad in [#2888](https://github.com/gradio-app/gradio/pull/2888)\n- Add support for specifying file extensions for `gr.File` and `gr.UploadButton`, using `file_types` parameter (e.g `gr.File(file_count=\"multiple\", file_types=[\"text\", \".json\", \".csv\"])`) by @dawoodkhan82 in [#2901](https://github.com/gradio-app/gradio/pull/2901)\n- Added `multiselect` option to `Dropdown` by @dawoodkhan82 in [#2871](https://github.com/gradio-app/gradio/pull/2871)\n\n###### With `multiselect` set to `true` a user can now select multiple options from the `gr.Dropdown` component.\n\n```python\ngr.Dropdown([\"angola\", \"pakistan\", \"canada\"], multiselect=True, value=[\"angola\"])\n```\n\n\"Screenshot\n\n### Bug Fixes:\n\n- Fixed bug where an error opening an audio file led to a crash by [@FelixDombek](https://github.com/FelixDombek) in [PR 2898](https://github.com/gradio-app/gradio/pull/2898)\n- Fixed bug where setting `default_enabled=False` made it so that the entire queue did not start by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2876](https://github.com/gradio-app/gradio/pull/2876)\n- Fixed bug where csv preview for DataFrame examples would show filename instead of file contents by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2877](https://github.com/gradio-app/gradio/pull/2877)\n- Fixed bug where an error raised after yielding iterative output would not be displayed in the browser by\n [@JaySmithWpg](https://github.com/JaySmithWpg) in [PR 2889](https://github.com/gradio-app/gradio/pull/2889)\n- Fixed bug in `blocks_style` demo that was preventing it from launching by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2890](https://github.com/gradio-app/gradio/pull/2890)\n- Fixed bug where files could not be downloaded by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2926](https://github.com/gradio-app/gradio/pull/2926)\n- Fixed bug where cached examples were not displaying properly by [@a-rogalska](https://github.com/a-rogalska) in [PR 2974](https://github.com/gradio-app/gradio/pull/2974)\n\n### Documentation Changes:\n\n- Added a Guide on using Google Sheets to create a real-time dashboard with Gradio's `DataFrame` and `LinePlot` component, by [@abidlabs](https://github.com/abidlabs) in [PR 2816](https://github.com/gradio-app/gradio/pull/2816)\n- Add a components - events matrix on the docs by [@aliabd](https://github.com/aliabd) in [PR 2921](https://github.com/gradio-app/gradio/pull/2921)\n\n### Testing and Infrastructure Changes:\n\n- Deployed PRs from forks to spaces by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2895](https://github.com/gradio-app/gradio/pull/2895)\n\n#\n\n### Full Changelog:\n\n- The `default_enabled` parameter of the `Blocks.queue` method has no effect by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2876](https://github.com/gradio-app/gradio/pull/2876)\n- Added typing to several Python files in codebase by [@abidlabs](https://github.com/abidlabs) in [PR 2887](https://github.com/gradio-app/gradio/pull/2887)\n- Excluding untracked files from demo notebook check action by [@aliabd](https://github.com/aliabd) in [PR 2897](https://github.com/gradio-app/gradio/pull/2897)\n- Optimize images and gifs by [@aliabd](https://github.com/aliabd) in [PR 2922](https://github.com/gradio-app/gradio/pull/2922)\n- Updated typing by [@1nF0rmed](https://github.com/1nF0rmed) in [PR 2904](https://github.com/gradio-app/gradio/pull/2904)\n\n### Contributors Shoutout:\n\n- @JaySmithWpg for making their first contribution to gradio!\n- @MohamedAliRashad for making their first contribution to gradio!\n\n## 3.15.0\n\n### New Features:\n\nGradio's newest plotting component `gr.LinePlot`! \ud83d\udcc8\n\nWith this component you can easily create time series visualizations with customizable\nappearance for your demos and dashboards ... all without having to know an external plotting library.\n\nFor an example of the api see below:\n\n```python\ngr.LinePlot(stocks,\n x=\"date\",\n y=\"price\",\n color=\"symbol\",\n color_legend_position=\"bottom\",\n width=600, height=400, title=\"Stock Prices\")\n```\n\n![image](https://user-images.githubusercontent.com/41651716/208711646-81ae3745-149b-46a3-babd-0569aecdd409.png)\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2807](https://github.com/gradio-app/gradio/pull/2807)\n\n### Bug Fixes:\n\n- Fixed bug where the `examples_per_page` parameter of the `Examples` component was not passed to the internal `Dataset` component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2861](https://github.com/gradio-app/gradio/pull/2861)\n- Fixes loading Spaces that have components with default values by [@abidlabs](https://github.com/abidlabs) in [PR 2855](https://github.com/gradio-app/gradio/pull/2855)\n- Fixes flagging when `allow_flagging=\"auto\"` in `gr.Interface()` by [@abidlabs](https://github.com/abidlabs) in [PR 2695](https://github.com/gradio-app/gradio/pull/2695)\n- Fixed bug where passing a non-list value to `gr.CheckboxGroup` would crash the entire app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2866](https://github.com/gradio-app/gradio/pull/2866)\n\n### Documentation Changes:\n\n- Added a Guide on using BigQuery with Gradio's `DataFrame` and `ScatterPlot` component,\n by [@abidlabs](https://github.com/abidlabs) in [PR 2794](https://github.com/gradio-app/gradio/pull/2794)\n\n#\n\n#\n\n### Full Changelog:\n\n- Fixed importing gradio can cause PIL.Image.registered_extensions() to break by `[@aliencaocao](https://github.com/aliencaocao)` in `[PR 2846](https://github.com/gradio-app/gradio/pull/2846)`\n- Fix css glitch and navigation in docs by [@aliabd](https://github.com/aliabd) in [PR 2856](https://github.com/gradio-app/gradio/pull/2856)\n- Added the ability to set `x_lim`, `y_lim` and legend positions for `gr.ScatterPlot` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2807](https://github.com/gradio-app/gradio/pull/2807)\n- Remove footers and min-height the correct way by [@aliabd](https://github.com/aliabd) in [PR 2860](https://github.com/gradio-app/gradio/pull/2860)\n\n#\n\n## 3.14.0\n\n### New Features:\n\n###### Add Waveform Visual Support to Audio\n\nAdds a `gr.make_waveform()` function that creates a waveform video by combining an audio and an optional background image by [@dawoodkhan82](http://github.com/dawoodkhan82) and [@aliabid94](http://github.com/aliabid94) in [PR 2706](https://github.com/gradio-app/gradio/pull/2706. Helpful for making audio outputs much more shareable.\n\n![waveform screenrecording](https://user-images.githubusercontent.com/7870876/206062396-164a5e71-451a-4fe0-94a7-cbe9269d57e6.gif)\n\n###### Allows Every Component to Accept an `every` Parameter\n\nWhen a component's initial value is a function, the `every` parameter re-runs the function every `every` seconds. By [@abidlabs](https://github.com/abidlabs) in [PR 2806](https://github.com/gradio-app/gradio/pull/2806). Here's a code example:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n df = gr.DataFrame(run_query, every=60*60)\n\ndemo.queue().launch()\n```\n\n### Bug Fixes:\n\n- Fixed issue where too many temporary files were created, all with randomly generated\n filepaths. Now fewer temporary files are created and are assigned a path that is a\n hash based on the file contents by [@abidlabs](https://github.com/abidlabs) in [PR 2758](https://github.com/gradio-app/gradio/pull/2758)\n\n#\n\n#\n\n#\n\n#\n\n#\n\n## 3.13.2\n\n#\n\n### Bug Fixes:\n\n\\*No changes to highlight.\n\n-\n\n### Documentation Changes:\n\n- Improves documentation of several queuing-related parameters by [@abidlabs](https://github.com/abidlabs) in [PR 2825](https://github.com/gradio-app/gradio/pull/2825)\n\n### Testing and Infrastructure Changes:\n\n- Remove h11 pinning by [@ecederstrand](https://github.com/ecederstrand) in [PR 2820](https://github.com/gradio-app/gradio/pull/2820)\n\n#\n\n#\n\n#\n\n## 3.13.1\n\n### New Features:\n\n###### New Shareable Links\n\nReplaces tunneling logic based on ssh port-forwarding to that based on `frp` by [XciD](https://github.com/XciD) and [Wauplin](https://github.com/Wauplin) in [PR 2509](https://github.com/gradio-app/gradio/pull/2509)\n\nYou don't need to do anything differently, but when you set `share=True` in `launch()`,\nyou'll get this message and a public link that look a little bit different:\n\n```bash\nSetting up a public link... we have recently upgraded the way public links are generated. If you encounter any problems, please downgrade to gradio version 3.13.0\n.\nRunning on public URL: https://bec81a83-5b5c-471e.gradio.live\n```\n\nThese links are a more secure and scalable way to create shareable demos!\n\n### Bug Fixes:\n\n- Allows `gr.Dataframe()` to take a `pandas.DataFrame` that includes numpy array and other types as its initial value, by [@abidlabs](https://github.com/abidlabs) in [PR 2804](https://github.com/gradio-app/gradio/pull/2804)\n- Add `altair` to requirements.txt by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2811](https://github.com/gradio-app/gradio/pull/2811)\n- Added aria-labels to icon buttons that are built into UI components by [@emilyuhde](http://github.com/emilyuhde) in [PR 2791](https://github.com/gradio-app/gradio/pull/2791)\n\n### Documentation Changes:\n\n- Fixed some typos in the \"Plot Component for Maps\" guide by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2811](https://github.com/gradio-app/gradio/pull/2811)\n\n### Testing and Infrastructure Changes:\n\n- Fixed test for IP address by [@abidlabs](https://github.com/abidlabs) in [PR 2808](https://github.com/gradio-app/gradio/pull/2808)\n\n#\n\n### Full Changelog:\n\n- Fixed typo in parameter `visible` in classes in `templates.py` by [@abidlabs](https://github.com/abidlabs) in [PR 2805](https://github.com/gradio-app/gradio/pull/2805)\n- Switched external service for getting IP address from `https://api.ipify.org` to `https://checkip.amazonaws.com/` by [@abidlabs](https://github.com/abidlabs) in [PR 2810](https://github.com/gradio-app/gradio/pull/2810)\n\n#\n\n- Fixed typo in parameter `visible` in classes in `templates.py` by [@abidlabs](https://github.com/abidlabs) in [PR 2805](https://github.com/gradio-app/gradio/pull/2805)\n- Switched external service for getting IP address from `https://api.ipify.org` to `https://checkip.amazonaws.com/` by [@abidlabs](https://github.com/abidlabs) in [PR 2810](https://github.com/gradio-app/gradio/pull/2810)\n\n## 3.13.0\n\n### New Features:\n\n###### Scatter plot component\n\nIt is now possible to create a scatter plot natively in Gradio!\n\nThe `gr.ScatterPlot` component accepts a pandas dataframe and some optional configuration parameters\nand will automatically create a plot for you!\n\nThis is the first of many native plotting components in Gradio!\n\nFor an example of how to use `gr.ScatterPlot` see below:\n\n```python\nimport gradio as gr\nfrom vega_datasets import data\n\ncars = data.cars()\n\nwith gr.Blocks() as demo:\n gr.ScatterPlot(show_label=False,\n value=cars,\n x=\"Horsepower\",\n y=\"Miles_per_Gallon\",\n color=\"Origin\",\n tooltip=\"Name\",\n title=\"Car Data\",\n y_title=\"Miles per Gallon\",\n color_legend_title=\"Origin of Car\").style(container=False)\n\ndemo.launch()\n```\n\n\"image\"\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2764](https://github.com/gradio-app/gradio/pull/2764)\n\n###### Support for altair plots\n\nThe `Plot` component can now accept altair plots as values!\nSimply return an altair plot from your event listener and gradio will display it in the front-end.\nSee the example below:\n\n```python\nimport gradio as gr\nimport altair as alt\nfrom vega_datasets import data\n\ncars = data.cars()\nchart = (\n alt.Chart(cars)\n .mark_point()\n .encode(\n x=\"Horsepower\",\n y=\"Miles_per_Gallon\",\n color=\"Origin\",\n )\n)\n\nwith gr.Blocks() as demo:\n gr.Plot(value=chart)\ndemo.launch()\n```\n\n\"image\"\n\nBy [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2741](https://github.com/gradio-app/gradio/pull/2741)\n\n###### Set the background color of a Label component\n\nThe `Label` component now accepts a `color` argument by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2736](https://github.com/gradio-app/gradio/pull/2736).\nThe `color` argument should either be a valid css color name or hexadecimal string.\nYou can update the color with `gr.Label.update`!\n\nThis lets you create Alert and Warning boxes with the `Label` component. See below:\n\n```python\nimport gradio as gr\nimport random\n\ndef update_color(value):\n if value < 0:\n # This is bad so use red\n return \"#FF0000\"\n elif 0 <= value <= 20:\n # Ok but pay attention (use orange)\n return \"#ff9966\"\n else:\n # Nothing to worry about\n return None\n\ndef update_value():\n choice = random.choice(['good', 'bad', 'so-so'])\n color = update_color(choice)\n return gr.Label.update(value=choice, color=color)\n\n\nwith gr.Blocks() as demo:\n label = gr.Label(value=-10)\n demo.load(lambda: update_value(), inputs=None, outputs=[label], every=1)\ndemo.queue().launch()\n```\n\n![label_bg_color_update](https://user-images.githubusercontent.com/41651716/204400372-80e53857-f26f-4a38-a1ae-1acadff75e89.gif)\n\n###### Add Brazilian Portuguese translation\n\nAdd Brazilian Portuguese translation (pt-BR.json) by [@pstwh](http://github.com/pstwh) in [PR 2753](https://github.com/gradio-app/gradio/pull/2753):\n\n\"image\"\n\n### Bug Fixes:\n\n- Fixed issue where image thumbnails were not showing when an example directory was provided\n by [@abidlabs](https://github.com/abidlabs) in [PR 2745](https://github.com/gradio-app/gradio/pull/2745)\n- Fixed bug loading audio input models from the hub by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2779](https://github.com/gradio-app/gradio/pull/2779).\n- Fixed issue where entities were not merged when highlighted text was generated from the\n dictionary inputs [@payoto](https://github.com/payoto) in [PR 2767](https://github.com/gradio-app/gradio/pull/2767)\n- Fixed bug where generating events did not finish running even if the websocket connection was closed by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2783](https://github.com/gradio-app/gradio/pull/2783).\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Images in the chatbot component are now resized if they exceed a max width by [@abidlabs](https://github.com/abidlabs) in [PR 2748](https://github.com/gradio-app/gradio/pull/2748)\n- Missing parameters have been added to `gr.Blocks().load()` by [@abidlabs](https://github.com/abidlabs) in [PR 2755](https://github.com/gradio-app/gradio/pull/2755)\n- Deindex share URLs from search by [@aliabd](https://github.com/aliabd) in [PR 2772](https://github.com/gradio-app/gradio/pull/2772)\n- Redirect old links and fix broken ones by [@aliabd](https://github.com/aliabd) in [PR 2774](https://github.com/gradio-app/gradio/pull/2774)\n\n#\n\n## 3.12.0\n\n### New Features:\n\n###### The `Chatbot` component now supports a subset of Markdown (including bold, italics, code, images)\n\nYou can now pass in some Markdown to the Chatbot component and it will show up,\nmeaning that you can pass in images as well! by [@abidlabs](https://github.com/abidlabs) in [PR 2731](https://github.com/gradio-app/gradio/pull/2731)\n\nHere's a simple example that references a local image `lion.jpg` that is in the same\nfolder as the Python script:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Chatbot([(\"hi\", \"hello **abubakar**\"), (\"![](/file=lion.jpg)\", \"cool pic\")])\n\ndemo.launch()\n```\n\n![Alt text](https://user-images.githubusercontent.com/1778297/204357455-5c1a4002-eee7-479d-9a1e-ba2c12522723.png)\n\nTo see a more realistic example, see the new demo `/demo/chatbot_multimodal/run.py`.\n\n###### Latex support\n\nAdded mathtext (a subset of latex) support to gr.Markdown. Added by [@kashif](https://github.com/kashif) and [@aliabid94](https://github.com/aliabid94) in [PR 2696](https://github.com/gradio-app/gradio/pull/2696).\n\nExample of how it can be used:\n\n```python\ngr.Markdown(\n r\"\"\"\n # Hello World! $\\frac{\\sqrt{x + y}}{4}$ is today's lesson.\n \"\"\")\n```\n\n###### Update Accordion properties from the backend\n\nYou can now update the Accordion `label` and `open` status with `gr.Accordion.update` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2690](https://github.com/gradio-app/gradio/pull/2690)\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Accordion(label=\"Open for greeting\", open=False) as accordion:\n gr.Textbox(\"Hello!\")\n open_btn = gr.Button(value=\"Open Accordion\")\n close_btn = gr.Button(value=\"Close Accordion\")\n open_btn.click(\n lambda: gr.Accordion.update(open=True, label=\"Open Accordion\"),\n inputs=None,\n outputs=[accordion],\n )\n close_btn.click(\n lambda: gr.Accordion.update(open=False, label=\"Closed Accordion\"),\n inputs=None,\n outputs=[accordion],\n )\ndemo.launch()\n```\n\n![update_accordion](https://user-images.githubusercontent.com/41651716/203164176-b102eae3-babe-4986-ae30-3ab4f400cedc.gif)\n\n### Bug Fixes:\n\n- Fixed bug where requests timeout is missing from utils.version_check() by [@yujiehecs](https://github.com/yujiehecs) in [PR 2729](https://github.com/gradio-app/gradio/pull/2729)\n- Fixed bug where so that the `File` component can properly preprocess files to \"binary\" byte-string format by [CoffeeVampir3](https://github.com/CoffeeVampir3) in [PR 2727](https://github.com/gradio-app/gradio/pull/2727)\n- Fixed bug to ensure that filenames are less than 200 characters even for non-English languages by [@SkyTNT](https://github.com/SkyTNT) in [PR 2685](https://github.com/gradio-app/gradio/pull/2685)\n\n### Documentation Changes:\n\n- Performance improvements to docs on mobile by [@aliabd](https://github.com/aliabd) in [PR 2730](https://github.com/gradio-app/gradio/pull/2730)\n\n#\n\n#\n\n### Full Changelog:\n\n- Make try examples button more prominent by [@aliabd](https://github.com/aliabd) in [PR 2705](https://github.com/gradio-app/gradio/pull/2705)\n- Fix id clashes in docs by [@aliabd](https://github.com/aliabd) in [PR 2713](https://github.com/gradio-app/gradio/pull/2713)\n- Fix typos in guide docs by [@andridns](https://github.com/andridns) in [PR 2722](https://github.com/gradio-app/gradio/pull/2722)\n- Add option to `include_audio` in Video component. When `True`, for `source=\"webcam\"` this will record audio and video, for `source=\"upload\"` this will retain the audio in an uploaded video by [@mandargogate](https://github.com/MandarGogate) in [PR 2721](https://github.com/gradio-app/gradio/pull/2721)\n\n### Contributors Shoutout:\n\n- [@andridns](https://github.com/andridns) made their first contribution in [PR 2722](https://github.com/gradio-app/gradio/pull/2722)!\n\n## 3.11.0\n\n### New Features:\n\n###### Upload Button\n\nThere is now a new component called the `UploadButton` which is a file upload component but in button form! You can also specify what file types it should accept in the form of a list (ex: `image`, `video`, `audio`, `text`, or generic `file`). Added by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2591](https://github.com/gradio-app/gradio/pull/2591).\n\nExample of how it can be used:\n\n```python\nimport gradio as gr\n\ndef upload_file(files):\n file_paths = [file.name for file in files]\n return file_paths\n\nwith gr.Blocks() as demo:\n file_output = gr.File()\n upload_button = gr.UploadButton(\"Click to Upload a File\", file_types=[\"image\", \"video\"], file_count=\"multiple\")\n upload_button.upload(upload_file, upload_button, file_output)\n\ndemo.launch()\n```\n\n###### Revamped API documentation page\n\nNew API Docs page with in-browser playground and updated aesthetics. [@gary149](https://github.com/gary149) in [PR 2652](https://github.com/gradio-app/gradio/pull/2652)\n\n###### Revamped Login page\n\nPreviously our login page had its own CSS, had no dark mode, and had an ugly json message on the wrong credentials. Made the page more aesthetically consistent, added dark mode support, and a nicer error message. [@aliabid94](https://github.com/aliabid94) in [PR 2684](https://github.com/gradio-app/gradio/pull/2684)\n\n###### Accessing the Requests Object Directly\n\nYou can now access the Request object directly in your Python function by [@abidlabs](https://github.com/abidlabs) in [PR 2641](https://github.com/gradio-app/gradio/pull/2641). This means that you can access request headers, the client IP address, and so on. In order to use it, add a parameter to your function and set its type hint to be `gr.Request`. Here's a simple example:\n\n```py\nimport gradio as gr\n\ndef echo(name, request: gr.Request):\n if request:\n print(\"Request headers dictionary:\", request.headers)\n print(\"IP address:\", request.client.host)\n return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n```\n\n### Bug Fixes:\n\n- Fixed bug that limited files from being sent over websockets to 16MB. The new limit\n is now 1GB by [@abidlabs](https://github.com/abidlabs) in [PR 2709](https://github.com/gradio-app/gradio/pull/2709)\n\n### Documentation Changes:\n\n- Updated documentation for embedding Gradio demos on Spaces as web components by\n [@julien-c](https://github.com/julien-c) in [PR 2698](https://github.com/gradio-app/gradio/pull/2698)\n- Updated IFrames in Guides to use the host URL instead of the Space name to be consistent with the new method for embedding Spaces, by\n [@julien-c](https://github.com/julien-c) in [PR 2692](https://github.com/gradio-app/gradio/pull/2692)\n- Colab buttons on every demo in the website! Just click open in colab, and run the demo there.\n\nhttps://user-images.githubusercontent.com/9021060/202878400-cb16ed47-f4dd-4cb0-b2f0-102a9ff64135.mov\n\n#\n\n#\n\n### Full Changelog:\n\n- Better warnings and error messages for `gr.Interface.load()` by [@abidlabs](https://github.com/abidlabs) in [PR 2694](https://github.com/gradio-app/gradio/pull/2694)\n- Add open in colab buttons to demos in docs and /demos by [@aliabd](https://github.com/aliabd) in [PR 2608](https://github.com/gradio-app/gradio/pull/2608)\n- Apply different formatting for the types in component docstrings by [@aliabd](https://github.com/aliabd) in [PR 2707](https://github.com/gradio-app/gradio/pull/2707)\n\n#\n\n## 3.10.1\n\n#\n\n### Bug Fixes:\n\n- Passes kwargs into `gr.Interface.load()` by [@abidlabs](https://github.com/abidlabs) in [PR 2669](https://github.com/gradio-app/gradio/pull/2669)\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Clean up printed statements in Embedded Colab Mode by [@aliabid94](https://github.com/aliabid94) in [PR 2612](https://github.com/gradio-app/gradio/pull/2612)\n\n#\n\n## 3.10.0\n\n- Add support for `'password'` and `'email'` types to `Textbox`. [@pngwn](https://github.com/pngwn) in [PR 2653](https://github.com/gradio-app/gradio/pull/2653)\n- `gr.Textbox` component will now raise an exception if `type` is not \"text\", \"email\", or \"password\" [@pngwn](https://github.com/pngwn) in [PR 2653](https://github.com/gradio-app/gradio/pull/2653). This will cause demos using the deprecated `gr.Textbox(type=\"number\")` to raise an exception.\n\n### Bug Fixes:\n\n- Updated the minimum FastApi used in tests to version 0.87 by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2647](https://github.com/gradio-app/gradio/pull/2647)\n- Fixed bug where interfaces with examples could not be loaded with `gr.Interface.load` by [@freddyaboulton](https://github.com/freddyaboulton) [PR 2640](https://github.com/gradio-app/gradio/pull/2640)\n- Fixed bug where the `interactive` property of a component could not be updated by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2639](https://github.com/gradio-app/gradio/pull/2639)\n- Fixed bug where some URLs were not being recognized as valid URLs and thus were not\n loading correctly in various components by [@abidlabs](https://github.com/abidlabs) in [PR 2659](https://github.com/gradio-app/gradio/pull/2659)\n\n### Documentation Changes:\n\n- Fix some typos in the embedded demo names in \"05_using_blocks_like_functions.md\" by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2656](https://github.com/gradio-app/gradio/pull/2656)\n\n#\n\n#\n\n### Full Changelog:\n\n- Add support for `'password'` and `'email'` types to `Textbox`. [@pngwn](https://github.com/pngwn) in [PR 2653](https://github.com/gradio-app/gradio/pull/2653)\n\n#\n\n## 3.9.1\n\n#\n\n### Bug Fixes:\n\n- Only set a min height on md and html when loading by [@pngwn](https://github.com/pngwn) in [PR 2623](https://github.com/gradio-app/gradio/pull/2623)\n\n### Documentation Changes:\n\n- See docs for the latest gradio commit to main as well the latest pip release:\n\n![main-vs-pip](https://user-images.githubusercontent.com/9021060/199607887-aab1ae4e-a070-4527-966d-024397abe15b.gif)\n\n- Modified the \"Connecting To a Database Guide\" to use `pd.read_sql` as opposed to low-level postgres connector by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2604](https://github.com/gradio-app/gradio/pull/2604)\n\n#\n\n#\n\n### Full Changelog:\n\n- Dropdown for seeing docs as latest or main by [@aliabd](https://github.com/aliabd) in [PR 2544](https://github.com/gradio-app/gradio/pull/2544)\n- Allow `gr.Templates` to accept parameters to override the defaults by [@abidlabs](https://github.com/abidlabs) in [PR 2600](https://github.com/gradio-app/gradio/pull/2600)\n- Components now throw a `ValueError()` if constructed with invalid parameters for `type` or `source` (for components that take those parameters) in [PR 2610](https://github.com/gradio-app/gradio/pull/2610)\n- Allow auth with using queue by [@GLGDLY](https://github.com/GLGDLY) in [PR 2611](https://github.com/gradio-app/gradio/pull/2611)\n\n#\n\n## 3.9\n\n### New Features:\n\n- Gradio is now embedded directly in colab without requiring the share link by [@aliabid94](https://github.com/aliabid94) in [PR 2455](https://github.com/gradio-app/gradio/pull/2455)\n\n###### Calling functions by api_name in loaded apps\n\nWhen you load an upstream app with `gr.Blocks.load`, you can now specify which fn\nto call with the `api_name` parameter.\n\n```python\nimport gradio as gr\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english-translator\")\ngerman = english_translator(\"My name is Freddy\", api_name='translate-to-german')\n```\n\nThe `api_name` parameter will take precedence over the `fn_index` parameter.\n\n### Bug Fixes:\n\n- Fixed bug where None could not be used for File,Model3D, and Audio examples by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2588](https://github.com/gradio-app/gradio/pull/2588)\n- Fixed links in Plotly map guide + demo by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2578](https://github.com/gradio-app/gradio/pull/2578)\n- `gr.Blocks.load()` now correctly loads example files from Spaces [@abidlabs](https://github.com/abidlabs) in [PR 2594](https://github.com/gradio-app/gradio/pull/2594)\n- Fixed bug when image clear started upload dialog [@mezotaken](https://github.com/mezotaken) in [PR 2577](https://github.com/gradio-app/gradio/pull/2577)\n\n### Documentation Changes:\n\n- Added a Guide on how to configure the queue for maximum performance by [@abidlabs](https://github.com/abidlabs) in [PR 2558](https://github.com/gradio-app/gradio/pull/2558)\n\n#\n\n#\n\n### Full Changelog:\n\n- Add `api_name` to `Blocks.__call__` by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2593](https://github.com/gradio-app/gradio/pull/2593)\n- Update queue with using deque & update requirements by [@GLGDLY](https://github.com/GLGDLY) in [PR 2428](https://github.com/gradio-app/gradio/pull/2428)\n\n#\n\n## 3.8.2\n\n### Bug Fixes:\n\n- Ensure gradio apps embedded via spaces use the correct endpoint for predictions. [@pngwn](https://github.com/pngwn) in [PR 2567](https://github.com/gradio-app/gradio/pull/2567)\n- Ensure gradio apps embedded via spaces use the correct websocket protocol. [@pngwn](https://github.com/pngwn) in [PR 2571](https://github.com/gradio-app/gradio/pull/2571)\n\n### New Features:\n\n###### Running Events Continuously\n\nGradio now supports the ability to run an event continuously on a fixed schedule. To use this feature,\npass `every=# of seconds` to the event definition. This will run the event every given number of seconds!\n\nThis can be used to:\n\n- Create live visualizations that show the most up to date data\n- Refresh the state of the frontend automatically in response to changes in the backend\n\nHere is an example of a live plot that refreshes every half second:\n\n```python\nimport math\nimport gradio as gr\nimport plotly.express as px\nimport numpy as np\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2*math.pi*period * x)\n fig = px.line(x=x, y=y)\n plot_end += 2 * math.pi\n return fig\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n plot = gr.Plot(label=\"Plot (updates every half second)\")\n\n dep = demo.load(get_plot, None, plot, every=0.5)\n period.change(get_plot, period, plot, every=0.5, cancels=[dep])\n\ndemo.queue().launch()\n```\n\n![live_demo](https://user-images.githubusercontent.com/41651716/198357377-633ce460-4e31-47bd-8202-1440cdd6fe19.gif)\n\n#\n\n### Documentation Changes:\n\n- Explained how to set up `queue` and `auth` when working with reload mode by by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3089](https://github.com/gradio-app/gradio/pull/3089)\n\n#\n\n#\n\n### Full Changelog:\n\n- Allows loading private Spaces by passing an an `api_key` to `gr.Interface.load()`\n by [@abidlabs](https://github.com/abidlabs) in [PR 2568](https://github.com/gradio-app/gradio/pull/2568)\n\n#\n\n## 3.8\n\n### New Features:\n\n- Allows event listeners to accept a single dictionary as its argument, where the keys are the components and the values are the component values. This is set by passing the input components in the event listener as a set instead of a list. [@aliabid94](https://github.com/aliabid94) in [PR 2550](https://github.com/gradio-app/gradio/pull/2550)\n\n### Bug Fixes:\n\n- Fix whitespace issue when using plotly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2548](https://github.com/gradio-app/gradio/pull/2548)\n- Apply appropriate alt text to all gallery images. [@camenduru](https://github.com/camenduru) in [PR 2358](https://github.com/gradio-app/gradio/pull/2538)\n- Removed erroneous tkinter import in gradio.blocks by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2555](https://github.com/gradio-app/gradio/pull/2555)\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- Added the `every` keyword to event listeners that runs events on a fixed schedule by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2512](https://github.com/gradio-app/gradio/pull/2512)\n- Fix whitespace issue when using plotly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2548](https://github.com/gradio-app/gradio/pull/2548)\n- Apply appropriate alt text to all gallery images. [@camenduru](https://github.com/camenduru) in [PR 2358](https://github.com/gradio-app/gradio/pull/2538)\n\n#\n\n## 3.7\n\n### New Features:\n\n###### Batched Functions\n\nGradio now supports the ability to pass _batched_ functions. Batched functions are just\nfunctions which take in a list of inputs and return a list of predictions.\n\nFor example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:\n\n```py\nimport time\n\ndef trim_words(words, lens):\n trimmed_words = []\n time.sleep(5)\n for w, l in zip(words, lens):\n trimmed_words.append(w[:l])\n return [trimmed_words]\n```\n\nThe advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically _batch_ incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe `batch=True` and `max_batch_size=16` -- both of these parameters can be passed\ninto event triggers or into the `Interface` class)\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n word = gr.Textbox(label=\"word\", value=\"abc\")\n leng = gr.Number(label=\"leng\", precision=0, value=1)\n output = gr.Textbox(label=\"Output\")\n with gr.Row():\n run = gr.Button()\n\n event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n```\n\nIn the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds).\n\n###### Upload Event\n\n`Video`, `Audio`, `Image`, and `File` components now support a `upload()` event that is triggered when a user uploads a file into any of these components.\n\nExample usage:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n input_video = gr.Video()\n output_video = gr.Video()\n\n # Clears the output video when an input video is uploaded\n input_video.upload(lambda : None, None, output_video)\n```\n\n### Bug Fixes:\n\n- Fixes issue where plotly animations, interactivity, titles, legends, were not working properly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2486](https://github.com/gradio-app/gradio/pull/2486)\n- Prevent requests to the `/api` endpoint from skipping the queue if the queue is enabled for that event by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2493](https://github.com/gradio-app/gradio/pull/2493)\n- Fixes a bug with `cancels` in event triggers so that it works properly if multiple\n Blocks are rendered by [@abidlabs](https://github.com/abidlabs) in [PR 2530](https://github.com/gradio-app/gradio/pull/2530)\n- Prevent invalid targets of events from crashing the whole application. [@pngwn](https://github.com/pngwn) in [PR 2534](https://github.com/gradio-app/gradio/pull/2534)\n- Properly dequeue cancelled events when multiple apps are rendered by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2540](https://github.com/gradio-app/gradio/pull/2540)\n- Fixes videos being cropped due to height/width params not being used [@hannahblair](https://github.com/hannahblair) in [PR 4946](https://github.com/gradio-app/gradio/pull/4946)\n\n### Documentation Changes:\n\n- Added an example interactive dashboard to the \"Tabular & Plots\" section of the Demos page by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2508](https://github.com/gradio-app/gradio/pull/2508)\n\n#\n\n#\n\n### Full Changelog:\n\n- Fixes the error message if a user builds Gradio locally and tries to use `share=True` by [@abidlabs](https://github.com/abidlabs) in [PR 2502](https://github.com/gradio-app/gradio/pull/2502)\n- Allows the render() function to return self by [@Raul9595](https://github.com/Raul9595) in [PR 2514](https://github.com/gradio-app/gradio/pull/2514)\n- Fixes issue where plotly animations, interactivity, titles, legends, were not working properly. [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2486](https://github.com/gradio-app/gradio/pull/2486)\n- Gradio now supports batched functions by [@abidlabs](https://github.com/abidlabs) in [PR 2218](https://github.com/gradio-app/gradio/pull/2218)\n- Add `upload` event for `Video`, `Audio`, `Image`, and `File` components [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2448](https://github.com/gradio-app/gradio/pull/2456)\n- Changes websocket path for Spaces as it is no longer necessary to have a different URL for websocket connections on Spaces by [@abidlabs](https://github.com/abidlabs) in [PR 2528](https://github.com/gradio-app/gradio/pull/2528)\n- Clearer error message when events are defined outside of a Blocks scope, and a warning if you\n try to use `Series` or `Parallel` with `Blocks` by [@abidlabs](https://github.com/abidlabs) in [PR 2543](https://github.com/gradio-app/gradio/pull/2543)\n- Adds support for audio samples that are in `float64`, `float16`, or `uint16` formats by [@abidlabs](https://github.com/abidlabs) in [PR 2545](https://github.com/gradio-app/gradio/pull/2545)\n\n#\n\n## 3.6\n\n### New Features:\n\n###### Cancelling Running Events\n\nRunning events can be cancelled when other events are triggered! To test this feature, pass the `cancels` parameter to the event listener.\nFor this feature to work, the queue must be enabled.\n\n![cancel_on_change_rl](https://user-images.githubusercontent.com/41651716/195952623-61a606bd-e82b-4e1a-802e-223154cb8727.gif)\n\nCode:\n\n```python\nimport time\nimport gradio as gr\n\ndef fake_diffusion(steps):\n for i in range(steps):\n time.sleep(1)\n yield str(i)\n\ndef long_prediction(*args, **kwargs):\n time.sleep(10)\n return 42\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n n = gr.Slider(1, 10, value=9, step=1, label=\"Number Steps\")\n run = gr.Button()\n output = gr.Textbox(label=\"Iterative Output\")\n stop = gr.Button(value=\"Stop Iterating\")\n with gr.Column():\n prediction = gr.Number(label=\"Expensive Calculation\")\n run_pred = gr.Button(value=\"Run Expensive Calculation\")\n with gr.Column():\n cancel_on_change = gr.Textbox(label=\"Cancel Iteration and Expensive Calculation on Change\")\n\n click_event = run.click(fake_diffusion, n, output)\n stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event])\n pred_event = run_pred.click(fn=long_prediction, inputs=None, outputs=prediction)\n\n cancel_on_change.change(None, None, None, cancels=[click_event, pred_event])\n\n\ndemo.queue(concurrency_count=1, max_size=20).launch()\n```\n\nFor interfaces, a stop button will be added automatically if the function uses a `yield` statement.\n\n```python\nimport gradio as gr\nimport time\n\ndef iteration(steps):\n for i in range(steps):\n time.sleep(0.5)\n yield i\n\ngr.Interface(iteration,\n inputs=gr.Slider(minimum=1, maximum=10, step=1, value=5),\n outputs=gr.Number()).queue().launch()\n```\n\n![stop_interface_rl](https://user-images.githubusercontent.com/41651716/195952883-e7ca4235-aae3-4852-8f28-96d01d0c5822.gif)\n\n### Bug Fixes:\n\n- Add loading status tracker UI to HTML and Markdown components. [@pngwn](https://github.com/pngwn) in [PR 2474](https://github.com/gradio-app/gradio/pull/2474)\n- Fixed videos being mirrored in the front-end if source is not webcam by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2475](https://github.com/gradio-app/gradio/pull/2475)\n- Add clear button for timeseries component [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2487](https://github.com/gradio-app/gradio/pull/2487)\n- Removes special characters from temporary filenames so that the files can be served by components [@abidlabs](https://github.com/abidlabs) in [PR 2480](https://github.com/gradio-app/gradio/pull/2480)\n- Fixed infinite reload loop when mounting gradio as a sub application by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2477](https://github.com/gradio-app/gradio/pull/2477)\n\n### Documentation Changes:\n\n- Adds a demo to show how a sound alert can be played upon completion of a prediction by [@abidlabs](https://github.com/abidlabs) in [PR 2478](https://github.com/gradio-app/gradio/pull/2478)\n\n#\n\n#\n\n### Full Changelog:\n\n- Enable running events to be cancelled from other events by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2433](https://github.com/gradio-app/gradio/pull/2433)\n- Small fix for version check before reuploading demos by [@aliabd](https://github.com/aliabd) in [PR 2469](https://github.com/gradio-app/gradio/pull/2469)\n- Add loading status tracker UI to HTML and Markdown components. [@pngwn](https://github.com/pngwn) in [PR 2400](https://github.com/gradio-app/gradio/pull/2474)\n- Add clear button for timeseries component [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2487](https://github.com/gradio-app/gradio/pull/2487)\n\n#\n\n## 3.5\n\n### Bug Fixes:\n\n- Ensure that Gradio does not take control of the HTML page title when embedding a gradio app as a web component, this behaviour flipped by adding `control_page_title=\"true\"` to the webcomponent. [@pngwn](https://github.com/pngwn) in [PR 2400](https://github.com/gradio-app/gradio/pull/2400)\n- Decreased latency in iterative-output demos by making the iteration asynchronous [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2409](https://github.com/gradio-app/gradio/pull/2409)\n- Fixed queue getting stuck under very high load by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2374](https://github.com/gradio-app/gradio/pull/2374)\n- Ensure that components always behave as if `interactive=True` were set when the following conditions are true:\n\n - no default value is provided,\n - they are not set as the input or output of an event,\n - `interactive` kwarg is not set.\n\n [@pngwn](https://github.com/pngwn) in [PR 2459](https://github.com/gradio-app/gradio/pull/2459)\n\n### New Features:\n\n- When an `Image` component is set to `source=\"upload\"`, it is now possible to drag and drop and image to replace a previously uploaded image by [@pngwn](https://github.com/pngwn) in [PR 1711](https://github.com/gradio-app/gradio/issues/1711)\n- The `gr.Dataset` component now accepts `HTML` and `Markdown` components by [@abidlabs](https://github.com/abidlabs) in [PR 2437](https://github.com/gradio-app/gradio/pull/2437)\n\n### Documentation Changes:\n\n- Improved documentation for the `gr.Dataset` component by [@abidlabs](https://github.com/abidlabs) in [PR 2437](https://github.com/gradio-app/gradio/pull/2437)\n\n#\n\n### Breaking Changes:\n\n- The `Carousel` component is officially deprecated. Since gradio 3.0, code containing the `Carousel` component would throw warnings. As of the next release, the `Carousel` component will raise an exception.\n\n### Full Changelog:\n\n- Speeds up Gallery component by using temporary files instead of base64 representation in the front-end by [@proxyphi](https://github.com/proxyphi), [@pngwn](https://github.com/pngwn), and [@abidlabs](https://github.com/abidlabs) in [PR 2265](https://github.com/gradio-app/gradio/pull/2265)\n- Fixed some embedded demos in the guides by not loading the gradio web component in some guides by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2403](https://github.com/gradio-app/gradio/pull/2403)\n- When an `Image` component is set to `source=\"upload\"`, it is now possible to drag and drop and image to replace a previously uploaded image by [@pngwn](https://github.com/pngwn) in [PR 2400](https://github.com/gradio-app/gradio/pull/2410)\n- Improve documentation of the `Blocks.load()` event by [@abidlabs](https://github.com/abidlabs) in [PR 2413](https://github.com/gradio-app/gradio/pull/2413)\n- Decreased latency in iterative-output demos by making the iteration asynchronous [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2409](https://github.com/gradio-app/gradio/pull/2409)\n- Updated share link message to reference new Spaces Hardware [@abidlabs](https://github.com/abidlabs) in [PR 2423](https://github.com/gradio-app/gradio/pull/2423)\n- Automatically restart spaces if they're down by [@aliabd](https://github.com/aliabd) in [PR 2405](https://github.com/gradio-app/gradio/pull/2405)\n- Carousel component is now deprecated by [@abidlabs](https://github.com/abidlabs) in [PR 2434](https://github.com/gradio-app/gradio/pull/2434)\n- Build Gradio from source in ui tests by by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2440](https://github.com/gradio-app/gradio/pull/2440)\n- Change \"return ValueError\" to \"raise ValueError\" by [@vzakharov](https://github.com/vzakharov) in [PR 2445](https://github.com/gradio-app/gradio/pull/2445)\n- Add guide on creating a map demo using the `gr.Plot()` component [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2402](https://github.com/gradio-app/gradio/pull/2402)\n- Add blur event for `Textbox` and `Number` components [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2448](https://github.com/gradio-app/gradio/pull/2448)\n- Stops a gradio launch from hogging a port even after it's been killed [@aliabid94](https://github.com/aliabid94) in [PR 2453](https://github.com/gradio-app/gradio/pull/2453)\n- Fix embedded interfaces on touch screen devices by [@aliabd](https://github.com/aliabd) in [PR 2457](https://github.com/gradio-app/gradio/pull/2457)\n- Upload all demos to spaces by [@aliabd](https://github.com/aliabd) in [PR 2281](https://github.com/gradio-app/gradio/pull/2281)\n\n#\n\n## 3.4.1\n\n### New Features:\n\n###### 1. See Past and Upcoming Changes in the Release History \ud83d\udc40\n\nYou can now see gradio's release history directly on the website, and also keep track of upcoming changes. Just go [here](https://gradio.app/changelog/).\n\n![release-history](https://user-images.githubusercontent.com/9021060/193145458-3de699f7-7620-45de-aa73-a1c1b9b96257.gif)\n\n### Bug Fixes:\n\n1. Fix typo in guide image path by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2357](https://github.com/gradio-app/gradio/pull/2357)\n2. Raise error if Blocks has duplicate component with same IDs by [@abidlabs](https://github.com/abidlabs) in [PR 2359](https://github.com/gradio-app/gradio/pull/2359)\n3. Catch the permission exception on the audio component by [@Ian-GL](https://github.com/Ian-GL) in [PR 2330](https://github.com/gradio-app/gradio/pull/2330)\n4. Fix image_classifier_interface_load demo by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2365](https://github.com/gradio-app/gradio/pull/2365)\n5. Fix combining adjacent components without gaps by introducing `gr.Row(variant=\"compact\")` by [@aliabid94](https://github.com/aliabid94) in [PR 2291](https://github.com/gradio-app/gradio/pull/2291) This comes with deprecation of the following arguments for `Component.style`: `round`, `margin`, `border`.\n6. Fix audio streaming, which was previously choppy in [PR 2351](https://github.com/gradio-app/gradio/pull/2351). Big thanks to [@yannickfunk](https://github.com/yannickfunk) for the proposed solution.\n7. Fix bug where new typeable slider doesn't respect the minimum and maximum values [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2380](https://github.com/gradio-app/gradio/pull/2380)\n\n### Documentation Changes:\n\n1. New Guide: Connecting to a Database \ud83d\uddc4\ufe0f\n\n A new guide by [@freddyaboulton](https://github.com/freddyaboulton) that explains how you can use Gradio to connect your app to a database. Read more [here](https://gradio.app/connecting_to_a_database/).\n\n2. New Guide: Running Background Tasks \ud83e\udd77\n\n A new guide by [@freddyaboulton](https://github.com/freddyaboulton) that explains how you can run background tasks from your gradio app. Read more [here](https://gradio.app/running_background_tasks/).\n\n3. Small fixes to docs for `Image` component by [@abidlabs](https://github.com/abidlabs) in [PR 2372](https://github.com/gradio-app/gradio/pull/2372)\n\n#\n\n#\n\n### Full Changelog:\n\n- Create a guide on how to connect an app to a database hosted on the cloud by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2341](https://github.com/gradio-app/gradio/pull/2341)\n- Removes `analytics` dependency by [@abidlabs](https://github.com/abidlabs) in [PR 2347](https://github.com/gradio-app/gradio/pull/2347)\n- Add guide on launching background tasks from your app by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2350](https://github.com/gradio-app/gradio/pull/2350)\n- Fix typo in guide image path by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2357](https://github.com/gradio-app/gradio/pull/2357)\n- Raise error if Blocks has duplicate component with same IDs by [@abidlabs](https://github.com/abidlabs) in [PR 2359](https://github.com/gradio-app/gradio/pull/2359)\n- Hotfix: fix version back to 3.4 by [@abidlabs](https://github.com/abidlabs) in [PR 2361](https://github.com/gradio-app/gradio/pull/2361)\n- Change version.txt to 3.4 instead of 3.4.0 by [@aliabd](https://github.com/aliabd) in [PR 2363](https://github.com/gradio-app/gradio/pull/2363)\n- Catch the permission exception on the audio component by [@Ian-GL](https://github.com/Ian-GL) in [PR 2330](https://github.com/gradio-app/gradio/pull/2330)\n- Fix image_classifier_interface_load demo by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2365](https://github.com/gradio-app/gradio/pull/2365)\n- Small fixes to docs for `Image` component by [@abidlabs](https://github.com/abidlabs) in [PR 2372](https://github.com/gradio-app/gradio/pull/2372)\n- Automated Release Notes by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2306](https://github.com/gradio-app/gradio/pull/2306)\n- Fixed small typos in the docs [@julien-c](https://github.com/julien-c) in [PR 2373](https://github.com/gradio-app/gradio/pull/2373)\n- Adds ability to disable pre/post-processing for examples [@abidlabs](https://github.com/abidlabs) in [PR 2383](https://github.com/gradio-app/gradio/pull/2383)\n- Copy changelog file in website docker by [@aliabd](https://github.com/aliabd) in [PR 2384](https://github.com/gradio-app/gradio/pull/2384)\n- Lets users provide a `gr.update()` dictionary even if post-processing is disabled [@abidlabs](https://github.com/abidlabs) in [PR 2385](https://github.com/gradio-app/gradio/pull/2385)\n- Fix bug where errors would cause apps run in reload mode to hang forever by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2394](https://github.com/gradio-app/gradio/pull/2394)\n- Fix bug where new typeable slider doesn't respect the minimum and maximum values [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2380](https://github.com/gradio-app/gradio/pull/2380)\n\n#\n\n## 3.4\n\n### New Features:\n\n###### 1. Gallery Captions \ud83d\uddbc\ufe0f\n\nYou can now pass captions to images in the Gallery component. To do so you need to pass a {List} of (image, {str} caption) tuples. This is optional and the component also accepts just a list of the images.\n\nHere's an example:\n\n```python\nimport gradio as gr\n\nimages_with_captions = [\n (\"https://images.unsplash.com/photo-1551969014-7d2c4cddf0b6\", \"Cheetah by David Groves\"),\n (\"https://images.unsplash.com/photo-1546182990-dffeafbe841d\", \"Lion by Francesco\"),\n (\"https://images.unsplash.com/photo-1561731216-c3a4d99437d5\", \"Tiger by Mike Marrah\")\n ]\n\nwith gr.Blocks() as demo:\n gr.Gallery(value=images_with_captions)\n\ndemo.launch()\n```\n\n\"gallery_captions\"\n\n###### 2. Type Values into the Slider \ud83d\udd22\n\nYou can now type values directly on the Slider component! Here's what it looks like:\n\n![type-slider](https://user-images.githubusercontent.com/9021060/192399877-76b662a1-fede-4417-a932-fc15f0da7360.gif)\n\n###### 3. Better Sketching and Inpainting \ud83c\udfa8\n\nWe've made a lot of changes to our Image component so that it can support better sketching and inpainting.\n\nNow supports:\n\n- A standalone black-and-white sketch\n\n```python\nimport gradio as gr\ndemo = gr.Interface(lambda x: x, gr.Sketchpad(), gr.Image())\ndemo.launch()\n```\n\n![bw](https://user-images.githubusercontent.com/9021060/192410264-b08632b5-7b2a-4f86-afb0-5760e7b474cf.gif)\n\n- A standalone color sketch\n\n```python\nimport gradio as gr\ndemo = gr.Interface(lambda x: x, gr.Paint(), gr.Image())\ndemo.launch()\n```\n\n![color-sketch](https://user-images.githubusercontent.com/9021060/192410500-3c8c3e64-a5fd-4df2-a991-f0a5cef93728.gif)\n\n- An uploadable image with black-and-white or color sketching\n\n```python\nimport gradio as gr\ndemo = gr.Interface(lambda x: x, gr.Image(source='upload', tool='color-sketch'), gr.Image()) # for black and white, tool = 'sketch'\ndemo.launch()\n```\n\n![sketch-new](https://user-images.githubusercontent.com/9021060/192402422-e53cb7b6-024e-448c-87eb-d6a35a63c476.gif)\n\n- Webcam with black-and-white or color sketching\n\n```python\nimport gradio as gr\ndemo = gr.Interface(lambda x: x, gr.Image(source='webcam', tool='color-sketch'), gr.Image()) # for black and white, tool = 'sketch'\ndemo.launch()\n```\n\n![webcam-sketch](https://user-images.githubusercontent.com/9021060/192410820-0ffaf324-776e-4e1f-9de6-0fdbbf4940fa.gif)\n\nAs well as other fixes\n\n### Bug Fixes:\n\n1. Fix bug where max concurrency count is not respected in queue by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2286](https://github.com/gradio-app/gradio/pull/2286)\n2. fix : queue could be blocked by [@SkyTNT](https://github.com/SkyTNT) in [PR 2288](https://github.com/gradio-app/gradio/pull/2288)\n3. Supports `gr.update()` in example caching by [@abidlabs](https://github.com/abidlabs) in [PR 2309](https://github.com/gradio-app/gradio/pull/2309)\n4. Clipboard fix for iframes by [@abidlabs](https://github.com/abidlabs) in [PR 2321](https://github.com/gradio-app/gradio/pull/2321)\n5. Fix: Dataframe column headers are reset when you add a new column by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2318](https://github.com/gradio-app/gradio/pull/2318)\n6. Added support for URLs for Video, Audio, and Image by [@abidlabs](https://github.com/abidlabs) in [PR 2256](https://github.com/gradio-app/gradio/pull/2256)\n7. Add documentation about how to create and use the Gradio FastAPI app by [@abidlabs](https://github.com/abidlabs) in [PR 2263](https://github.com/gradio-app/gradio/pull/2263)\n\n### Documentation Changes:\n\n1. Adding a Playground Tab to the Website by [@aliabd](https://github.com/aliabd) in [PR 1860](https://github.com/gradio-app/gradio/pull/1860)\n2. Gradio for Tabular Data Science Workflows Guide by [@merveenoyan](https://github.com/merveenoyan) in [PR 2199](https://github.com/gradio-app/gradio/pull/2199)\n3. Promotes `postprocess` and `preprocess` to documented parameters by [@abidlabs](https://github.com/abidlabs) in [PR 2293](https://github.com/gradio-app/gradio/pull/2293)\n4. Update 2)key_features.md by [@voidxd](https://github.com/voidxd) in [PR 2326](https://github.com/gradio-app/gradio/pull/2326)\n5. Add docs to blocks context postprocessing function by [@Ian-GL](https://github.com/Ian-GL) in [PR 2332](https://github.com/gradio-app/gradio/pull/2332)\n\n### Testing and Infrastructure Changes\n\n1. Website fixes and refactoring by [@aliabd](https://github.com/aliabd) in [PR 2280](https://github.com/gradio-app/gradio/pull/2280)\n2. Don't deploy to spaces on release by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2313](https://github.com/gradio-app/gradio/pull/2313)\n\n### Full Changelog:\n\n- Website fixes and refactoring by [@aliabd](https://github.com/aliabd) in [PR 2280](https://github.com/gradio-app/gradio/pull/2280)\n- Fix bug where max concurrency count is not respected in queue by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2286](https://github.com/gradio-app/gradio/pull/2286)\n- Promotes `postprocess` and `preprocess` to documented parameters by [@abidlabs](https://github.com/abidlabs) in [PR 2293](https://github.com/gradio-app/gradio/pull/2293)\n- Raise warning when trying to cache examples but not all inputs have examples by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2279](https://github.com/gradio-app/gradio/pull/2279)\n- fix : queue could be blocked by [@SkyTNT](https://github.com/SkyTNT) in [PR 2288](https://github.com/gradio-app/gradio/pull/2288)\n- Don't deploy to spaces on release by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2313](https://github.com/gradio-app/gradio/pull/2313)\n- Supports `gr.update()` in example caching by [@abidlabs](https://github.com/abidlabs) in [PR 2309](https://github.com/gradio-app/gradio/pull/2309)\n- Respect Upstream Queue when loading interfaces/blocks from Spaces by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2294](https://github.com/gradio-app/gradio/pull/2294)\n- Clipboard fix for iframes by [@abidlabs](https://github.com/abidlabs) in [PR 2321](https://github.com/gradio-app/gradio/pull/2321)\n- Sketching + Inpainting Capabilities to Gradio by [@abidlabs](https://github.com/abidlabs) in [PR 2144](https://github.com/gradio-app/gradio/pull/2144)\n- Update 2)key_features.md by [@voidxd](https://github.com/voidxd) in [PR 2326](https://github.com/gradio-app/gradio/pull/2326)\n- release 3.4b3 by [@abidlabs](https://github.com/abidlabs) in [PR 2328](https://github.com/gradio-app/gradio/pull/2328)\n- Fix: Dataframe column headers are reset when you add a new column by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2318](https://github.com/gradio-app/gradio/pull/2318)\n- Start queue when gradio is a sub application by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2319](https://github.com/gradio-app/gradio/pull/2319)\n- Fix Web Tracker Script by [@aliabd](https://github.com/aliabd) in [PR 2308](https://github.com/gradio-app/gradio/pull/2308)\n- Add docs to blocks context postprocessing function by [@Ian-GL](https://github.com/Ian-GL) in [PR 2332](https://github.com/gradio-app/gradio/pull/2332)\n- Fix typo in iterator variable name in run_predict function by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2340](https://github.com/gradio-app/gradio/pull/2340)\n- Add captions to galleries by [@aliabid94](https://github.com/aliabid94) in [PR 2284](https://github.com/gradio-app/gradio/pull/2284)\n- Typeable value on gradio.Slider by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2329](https://github.com/gradio-app/gradio/pull/2329)\n\n### Contributors Shoutout:\n\n- [@SkyTNT](https://github.com/SkyTNT) made their first contribution in [PR 2288](https://github.com/gradio-app/gradio/pull/2288)\n- [@voidxd](https://github.com/voidxd) made their first contribution in [PR 2326](https://github.com/gradio-app/gradio/pull/2326)\n\n## 3.3\n\n### New Features:\n\n###### 1. Iterative Outputs \u23f3\n\nYou can now create an iterative output simply by having your function return a generator!\n\nHere's (part of) an example that was used to generate the interface below it. [See full code](https://colab.research.google.com/drive/1m9bWS6B82CT7bw-m4L6AJR8za7fEK7Ov?usp=sharing).\n\n```python\ndef predict(steps, seed):\n generator = torch.manual_seed(seed)\n for i in range(1,steps):\n yield pipeline(generator=generator, num_inference_steps=i)[\"sample\"][0]\n```\n\n![example](https://user-images.githubusercontent.com/9021060/189086273-f5e7087d-71fa-4158-90a9-08e84da0421c.mp4)\n\n###### 2. Accordion Layout \ud83c\udd95\n\nThis version of Gradio introduces a new layout component to Blocks: the Accordion. Wrap your elements in a neat, expandable layout that allows users to toggle them as needed.\n\nUsage: ([Read the docs](https://gradio.app/docs/#accordion))\n\n```python\nwith gr.Accordion(\"open up\"):\n# components here\n```\n\n![accordion](https://user-images.githubusercontent.com/9021060/189088465-f0ffd7f0-fc6a-42dc-9249-11c5e1e0529b.gif)\n\n###### 3. Skops Integration \ud83d\udcc8\n\nOur new integration with [skops](https://huggingface.co/blog/skops) allows you to load tabular classification and regression models directly from the [hub](https://huggingface.co/models).\n\nHere's a classification example showing how quick it is to set up an interface for a [model](https://huggingface.co/scikit-learn/tabular-playground).\n\n```python\nimport gradio as gr\ngr.Interface.load(\"models/scikit-learn/tabular-playground\").launch()\n```\n\n![187936493-5c90c01d-a6dd-400f-aa42-833a096156a1](https://user-images.githubusercontent.com/9021060/189090519-328fbcb4-120b-43c8-aa54-d6fccfa6b7e8.png)\n\n#\n\n#\n\n#\n\n#\n\n### Full Changelog:\n\n- safari fixes by [@pngwn](https://github.com/pngwn) in [PR 2138](https://github.com/gradio-app/gradio/pull/2138)\n- Fix roundedness and form borders by [@aliabid94](https://github.com/aliabid94) in [PR 2147](https://github.com/gradio-app/gradio/pull/2147)\n- Better processing of example data prior to creating dataset component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2147](https://github.com/gradio-app/gradio/pull/2147)\n- Show error on Connection drops by [@aliabid94](https://github.com/aliabid94) in [PR 2147](https://github.com/gradio-app/gradio/pull/2147)\n- 3.2 release! by [@abidlabs](https://github.com/abidlabs) in [PR 2139](https://github.com/gradio-app/gradio/pull/2139)\n- Fixed Named API Requests by [@abidlabs](https://github.com/abidlabs) in [PR 2151](https://github.com/gradio-app/gradio/pull/2151)\n- Quick Fix: Cannot upload Model3D image after clearing it by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2168](https://github.com/gradio-app/gradio/pull/2168)\n- Fixed misleading log when server_name is '0.0.0.0' by [@lamhoangtung](https://github.com/lamhoangtung) in [PR 2176](https://github.com/gradio-app/gradio/pull/2176)\n- Keep embedded PngInfo metadata by [@cobryan05](https://github.com/cobryan05) in [PR 2170](https://github.com/gradio-app/gradio/pull/2170)\n- Skops integration: Load tabular classification and regression models from the hub by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2126](https://github.com/gradio-app/gradio/pull/2126)\n- Respect original filename when cached example files are downloaded by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2145](https://github.com/gradio-app/gradio/pull/2145)\n- Add manual trigger to deploy to pypi by [@abidlabs](https://github.com/abidlabs) in [PR 2192](https://github.com/gradio-app/gradio/pull/2192)\n- Fix bugs with gr.update by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2157](https://github.com/gradio-app/gradio/pull/2157)\n- Make queue per app by [@aliabid94](https://github.com/aliabid94) in [PR 2193](https://github.com/gradio-app/gradio/pull/2193)\n- Preserve Labels In Interpretation Components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2166](https://github.com/gradio-app/gradio/pull/2166)\n- Quick Fix: Multiple file download not working by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2169](https://github.com/gradio-app/gradio/pull/2169)\n- use correct MIME type for js-script file by [@daspartho](https://github.com/daspartho) in [PR 2200](https://github.com/gradio-app/gradio/pull/2200)\n- Add accordion component by [@aliabid94](https://github.com/aliabid94) in [PR 2208](https://github.com/gradio-app/gradio/pull/2208)\n\n### Contributors Shoutout:\n\n- [@lamhoangtung](https://github.com/lamhoangtung) made their first contribution in [PR 2176](https://github.com/gradio-app/gradio/pull/2176)\n- [@cobryan05](https://github.com/cobryan05) made their first contribution in [PR 2170](https://github.com/gradio-app/gradio/pull/2170)\n- [@daspartho](https://github.com/daspartho) made their first contribution in [PR 2200](https://github.com/gradio-app/gradio/pull/2200)\n\n## 3.2\n\n### New Features:\n\n###### 1. Improvements to Queuing \ud83e\udd47\n\nWe've implemented a brand new queuing system based on **web sockets** instead of HTTP long polling. Among other things, this allows us to manage queue sizes better on Hugging Face Spaces. There are also additional queue-related parameters you can add:\n\n- Now supports concurrent workers (parallelization)\n\n```python\ndemo = gr.Interface(...)\ndemo.queue(concurrency_count=3)\ndemo.launch()\n```\n\n- Configure a maximum queue size\n\n```python\ndemo = gr.Interface(...)\ndemo.queue(max_size=100)\ndemo.launch()\n```\n\n- If a user closes their tab / browser, they leave the queue, which means the demo will run faster for everyone else\n\n###### 2. Fixes to Examples\n\n- Dataframe examples will render properly, and look much clearer in the UI: (thanks to PR #2125)\n\n![Screen Shot 2022-08-30 at 8 29 58 PM](https://user-images.githubusercontent.com/9021060/187586561-d915bafb-f968-4966-b9a2-ef41119692b2.png)\n\n- Image and Video thumbnails are cropped to look neater and more uniform: (thanks to PR #2109)\n\n![Screen Shot 2022-08-30 at 8 32 15 PM](https://user-images.githubusercontent.com/9021060/187586890-56e1e4f0-1b84-42d9-a82f-911772c41030.png)\n\n- Other fixes in PR #2131 and #2064 make it easier to design and use Examples\n\n###### 3. Component Fixes \ud83e\uddf1\n\n- Specify the width and height of an image in its style tag (thanks to PR #2133)\n\n```python\ncomponents.Image().style(height=260, width=300)\n```\n\n- Automatic conversion of videos so they are playable in the browser (thanks to PR #2003). Gradio will check if a video's format is playable in the browser and, if it isn't, will automatically convert it to a format that is (mp4).\n- Pass in a json filepath to the Label component (thanks to PR #2083)\n- Randomize the default value of a Slider (thanks to PR #1935)\n\n![slider-random](https://user-images.githubusercontent.com/9021060/187596230-3db9697f-9f4d-42f5-9387-d77573513448.gif)\n\n- Improvements to State in PR #2100\n\n###### 4. Ability to Randomize Input Sliders and Reload Data whenever the Page Loads\n\n- In some cases, you want to be able to show a different set of input data to every user as they load the page app. For example, you might want to randomize the value of a \"seed\" `Slider` input. Or you might want to show a `Textbox` with the current date. We now supporting passing _functions_ as the default value in input components. When you pass in a function, it gets **re-evaluated** every time someone loads the demo, allowing you to reload / change data for different users.\n\nHere's an example loading the current date time into an input Textbox:\n\n```python\nimport gradio as gr\nimport datetime\n\nwith gr.Blocks() as demo:\n gr.Textbox(datetime.datetime.now)\n\ndemo.launch()\n```\n\nNote that we don't evaluate the function -- `datetime.datetime.now()` -- we pass in the function itself to get this behavior -- `datetime.datetime.now`\n\nBecause randomizing the initial value of `Slider` is a common use case, we've added a `randomize` keyword argument you can use to randomize its initial value:\n\n```python\nimport gradio as gr\ndemo = gr.Interface(lambda x:x, gr.Slider(0, 10, randomize=True), \"number\")\ndemo.launch()\n```\n\n###### 5. New Guide \ud83d\udd8a\ufe0f\n\n- [Gradio and W&B Integration](https://gradio.app/Gradio_and_Wandb_Integration/)\n\n### Full Changelog:\n\n- Reset components to original state by setting value to None by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2044](https://github.com/gradio-app/gradio/pull/2044)\n- Cleaning up the way data is processed for components by [@abidlabs](https://github.com/abidlabs) in [PR 1967](https://github.com/gradio-app/gradio/pull/1967)\n- version 3.1.8b by [@abidlabs](https://github.com/abidlabs) in [PR 2063](https://github.com/gradio-app/gradio/pull/2063)\n- Wandb guide by [@AK391](https://github.com/AK391) in [PR 1898](https://github.com/gradio-app/gradio/pull/1898)\n- Add a flagging callback to save json files to a hugging face dataset by [@chrisemezue](https://github.com/chrisemezue) in [PR 1821](https://github.com/gradio-app/gradio/pull/1821)\n- Add data science demos to landing page by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2067](https://github.com/gradio-app/gradio/pull/2067)\n- Hide time series + xgboost demos by default by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2079](https://github.com/gradio-app/gradio/pull/2079)\n- Encourage people to keep trying when queue full by [@apolinario](https://github.com/apolinario) in [PR 2076](https://github.com/gradio-app/gradio/pull/2076)\n- Updated our analytics on creation of Blocks/Interface by [@abidlabs](https://github.com/abidlabs) in [PR 2082](https://github.com/gradio-app/gradio/pull/2082)\n- `Label` component now accepts file paths to `.json` files by [@abidlabs](https://github.com/abidlabs) in [PR 2083](https://github.com/gradio-app/gradio/pull/2083)\n- Fix issues related to demos in Spaces by [@abidlabs](https://github.com/abidlabs) in [PR 2086](https://github.com/gradio-app/gradio/pull/2086)\n- Fix TimeSeries examples not properly displayed in UI by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2064](https://github.com/gradio-app/gradio/pull/2064)\n- Fix infinite requests when doing tab item select by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2070](https://github.com/gradio-app/gradio/pull/2070)\n- Accept deprecated `file` route as well by [@abidlabs](https://github.com/abidlabs) in [PR 2099](https://github.com/gradio-app/gradio/pull/2099)\n- Allow frontend method execution on Block.load event by [@codedealer](https://github.com/codedealer) in [PR 2108](https://github.com/gradio-app/gradio/pull/2108)\n- Improvements to `State` by [@abidlabs](https://github.com/abidlabs) in [PR 2100](https://github.com/gradio-app/gradio/pull/2100)\n- Catch IndexError, KeyError in video_is_playable by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 2113](https://github.com/gradio-app/gradio/pull/2113)\n- Fix: Download button does not respect the filepath returned by the function by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 2073](https://github.com/gradio-app/gradio/pull/2073)\n- Refactoring Layout: Adding column widths, forms, and more. by [@aliabid94](https://github.com/aliabid94) in [PR 2097](https://github.com/gradio-app/gradio/pull/2097)\n- Update CONTRIBUTING.md by [@abidlabs](https://github.com/abidlabs) in [PR 2118](https://github.com/gradio-app/gradio/pull/2118)\n- 2092 df ex by [@pngwn](https://github.com/pngwn) in [PR 2125](https://github.com/gradio-app/gradio/pull/2125)\n- feat(samples table/gallery): Crop thumbs to square by [@ronvoluted](https://github.com/ronvoluted) in [PR 2109](https://github.com/gradio-app/gradio/pull/2109)\n- Some enhancements to `gr.Examples` by [@abidlabs](https://github.com/abidlabs) in [PR 2131](https://github.com/gradio-app/gradio/pull/2131)\n- Image size fix by [@aliabid94](https://github.com/aliabid94) in [PR 2133](https://github.com/gradio-app/gradio/pull/2133)\n\n### Contributors Shoutout:\n\n- [@chrisemezue](https://github.com/chrisemezue) made their first contribution in [PR 1821](https://github.com/gradio-app/gradio/pull/1821)\n- [@apolinario](https://github.com/apolinario) made their first contribution in [PR 2076](https://github.com/gradio-app/gradio/pull/2076)\n- [@codedealer](https://github.com/codedealer) made their first contribution in [PR 2108](https://github.com/gradio-app/gradio/pull/2108)\n\n## 3.1\n\n### New Features:\n\n###### 1. Embedding Demos on Any Website \ud83d\udcbb\n\nWith PR #1444, Gradio is now distributed as a web component. This means demos can be natively embedded on websites. You'll just need to add two lines: one to load the gradio javascript, and one to link to the demos backend.\n\nHere's a simple example that embeds the demo from a Hugging Face space:\n\n```html\n\n\n```\n\nBut you can also embed demos that are running anywhere, you just need to link the demo to `src` instead of `space`. In fact, all the demos on the gradio website are embedded this way:\n\n\"Screen\n\nRead more in the [Embedding Gradio Demos](https://gradio.app/embedding_gradio_demos) guide.\n\n###### 2. Reload Mode \ud83d\udc68\u200d\ud83d\udcbb\n\nReload mode helps developers create gradio demos faster by automatically reloading the demo whenever the code changes. It can support development on Python IDEs (VS Code, PyCharm, etc), the terminal, as well as Jupyter notebooks.\n\nIf your demo code is in a script named `app.py`, instead of running `python app.py` you can now run `gradio app.py` and that will launch the demo in reload mode:\n\n```bash\nLaunching in reload mode on: http://127.0.0.1:7860 (Press CTRL+C to quit)\nWatching...\nWARNING: The --reload flag should not be used in production on Windows.\n```\n\nIf you're working from a Jupyter or Colab Notebook, use these magic commands instead: `%load_ext gradio` when you import gradio, and `%%blocks` in the top of the cell with the demo code. Here's an example that shows how much faster the development becomes:\n\n![Blocks](https://user-images.githubusercontent.com/9021060/178986488-ed378cc8-5141-4330-ba41-672b676863d0.gif)\n\n###### 3. Inpainting Support on `gr.Image()` \ud83c\udfa8\n\nWe updated the Image component to add support for inpainting demos. It works by adding `tool=\"sketch\"` as a parameter, that passes both an image and a sketchable mask to your prediction function.\n\nHere's an example from the [LAMA space](https://huggingface.co/spaces/akhaliq/lama):\n\n![FXApVlFVsAALSD-](https://user-images.githubusercontent.com/9021060/178989479-549867c8-7fb0-436a-a97d-1e91c9f5e611.jpeg)\n\n###### 4. Markdown and HTML support in Dataframes \ud83d\udd22\n\nWe upgraded the Dataframe component in PR #1684 to support rendering Markdown and HTML inside the cells.\n\nThis means you can build Dataframes that look like the following:\n\n![image (8)](https://user-images.githubusercontent.com/9021060/178991233-41cb07a5-e7a3-433e-89b8-319bc78eb9c2.png)\n\n###### 5. `gr.Examples()` for Blocks \ud83e\uddf1\n\nWe've added the `gr.Examples` component helper to allow you to add examples to any Blocks demo. This class is a wrapper over the `gr.Dataset` component.\n\n\"Screen\n\ngr.Examples takes two required parameters:\n\n- `examples` which takes in a nested list\n- `inputs` which takes in a component or list of components\n\nYou can read more in the [Examples docs](https://gradio.app/docs/#examples) or the [Adding Examples to your Demos guide](https://gradio.app/adding_examples_to_your_app/).\n\n###### 6. Fixes to Audio Streaming\n\nWith [PR 1828](https://github.com/gradio-app/gradio/pull/1828) we now hide the status loading animation, as well as remove the echo in streaming. Check out the [stream_audio](https://github.com/gradio-app/gradio/blob/main/demo/stream_audio/run.py) demo for more or read through our [Real Time Speech Recognition](https://gradio.app/real_time_speech_recognition/) guide.\n\n\"Screen\n\n### Full Changelog:\n\n- File component: list multiple files and allow for download #1446 by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1681](https://github.com/gradio-app/gradio/pull/1681)\n- Add ColorPicker to docs by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1768](https://github.com/gradio-app/gradio/pull/1768)\n- Mock out requests in TestRequest unit tests by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1794](https://github.com/gradio-app/gradio/pull/1794)\n- Add requirements.txt and test_files to source dist by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1817](https://github.com/gradio-app/gradio/pull/1817)\n- refactor: f-string for tunneling.py by [@nhankiet](https://github.com/nhankiet) in [PR 1819](https://github.com/gradio-app/gradio/pull/1819)\n- Miscellaneous formatting improvements to website by [@aliabd](https://github.com/aliabd) in [PR 1754](https://github.com/gradio-app/gradio/pull/1754)\n- `integrate()` method moved to `Blocks` by [@abidlabs](https://github.com/abidlabs) in [PR 1776](https://github.com/gradio-app/gradio/pull/1776)\n- Add python-3.7 tests by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1818](https://github.com/gradio-app/gradio/pull/1818)\n- Copy test dir in website dockers by [@aliabd](https://github.com/aliabd) in [PR 1827](https://github.com/gradio-app/gradio/pull/1827)\n- Add info to docs on how to set default values for components by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1788](https://github.com/gradio-app/gradio/pull/1788)\n- Embedding Components on Docs by [@aliabd](https://github.com/aliabd) in [PR 1726](https://github.com/gradio-app/gradio/pull/1726)\n- Remove usage of deprecated gr.inputs and gr.outputs from website by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1796](https://github.com/gradio-app/gradio/pull/1796)\n- Some cleanups to the docs page by [@abidlabs](https://github.com/abidlabs) in [PR 1822](https://github.com/gradio-app/gradio/pull/1822)\n\n### Contributors Shoutout:\n\n- [@nhankiet](https://github.com/nhankiet) made their first contribution in [PR 1819](https://github.com/gradio-app/gradio/pull/1819)\n\n## 3.0\n\n###### \ud83d\udd25 Gradio 3.0 is the biggest update to the library, ever.\n\n### New Features:\n\n###### 1. Blocks \ud83e\uddf1\n\nBlocks is a new, low-level API that allows you to have full control over the data flows and layout of your application. It allows you to build very complex, multi-step applications. For example, you might want to:\n\n- Group together related demos as multiple tabs in one web app\n- Change the layout of your demo instead of just having all of the inputs on the left and outputs on the right\n- Have multi-step interfaces, in which the output of one model becomes the input to the next model, or have more flexible data flows in general\n- Change a component's properties (for example, the choices in a Dropdown) or its visibility based on user input\n\nHere's a simple example that creates the demo below it:\n\n```python\nimport gradio as gr\n\ndef update(name):\n return f\"Welcome to Gradio, {name}!\"\n\ndemo = gr.Blocks()\n\nwith demo:\n gr.Markdown(\n \"\"\"\n # Hello World!\n Start typing below to see the output.\n \"\"\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n\n inp.change(fn=update,\n inputs=inp,\n outputs=out)\n\ndemo.launch()\n```\n\n![hello-blocks](https://user-images.githubusercontent.com/9021060/168684108-78cbd24b-e6bd-4a04-a8d9-20d535203434.gif)\n\nRead our [Introduction to Blocks](http://gradio.app/introduction_to_blocks/) guide for more, and join the \ud83c\udf88 [Gradio Blocks Party](https://huggingface.co/spaces/Gradio-Blocks/README)!\n\n###### 2. Our Revamped Design \ud83c\udfa8\n\nWe've upgraded our design across the entire library: from components, and layouts all the way to dark mode.\n\n![kitchen_sink](https://user-images.githubusercontent.com/9021060/168686333-7a6e3096-3e23-4309-abf2-5cd7736e0463.gif)\n\n###### 3. A New Website \ud83d\udcbb\n\nWe've upgraded [gradio.app](https://gradio.app) to make it cleaner, faster and easier to use. Our docs now come with components and demos embedded directly on the page. So you can quickly get up to speed with what you're looking for.\n\n![website](https://user-images.githubusercontent.com/9021060/168687191-10d6a3bd-101f-423a-8193-48f47a5e077d.gif)\n\n###### 4. New Components: Model3D, Dataset, and More..\n\nWe've introduced a lot of new components in `3.0`, including `Model3D`, `Dataset`, `Markdown`, `Button` and `Gallery`. You can find all the components and play around with them [here](https://gradio.app/docs/#components).\n\n![Model3d](https://user-images.githubusercontent.com/9021060/168689062-6ad77151-8cc5-467d-916c-f7c78e52ec0c.gif)\n\n### Full Changelog:\n\n- Gradio dash fe by [@pngwn](https://github.com/pngwn) in [PR 807](https://github.com/gradio-app/gradio/pull/807)\n- Blocks components by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 765](https://github.com/gradio-app/gradio/pull/765)\n- Blocks components V2 by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 843](https://github.com/gradio-app/gradio/pull/843)\n- Blocks-Backend-Events by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 844](https://github.com/gradio-app/gradio/pull/844)\n- Interfaces from Blocks by [@aliabid94](https://github.com/aliabid94) in [PR 849](https://github.com/gradio-app/gradio/pull/849)\n- Blocks dev by [@aliabid94](https://github.com/aliabid94) in [PR 853](https://github.com/gradio-app/gradio/pull/853)\n- Started updating demos to use the new `gradio.components` syntax by [@abidlabs](https://github.com/abidlabs) in [PR 848](https://github.com/gradio-app/gradio/pull/848)\n- add test infra + add browser tests to CI by [@pngwn](https://github.com/pngwn) in [PR 852](https://github.com/gradio-app/gradio/pull/852)\n- 854 textbox by [@pngwn](https://github.com/pngwn) in [PR 859](https://github.com/gradio-app/gradio/pull/859)\n- Getting old Python unit tests to pass on `blocks-dev` by [@abidlabs](https://github.com/abidlabs) in [PR 861](https://github.com/gradio-app/gradio/pull/861)\n- initialise chatbot with empty array of messages by [@pngwn](https://github.com/pngwn) in [PR 867](https://github.com/gradio-app/gradio/pull/867)\n- add test for output to input by [@pngwn](https://github.com/pngwn) in [PR 866](https://github.com/gradio-app/gradio/pull/866)\n- More Interface -> Blocks features by [@aliabid94](https://github.com/aliabid94) in [PR 864](https://github.com/gradio-app/gradio/pull/864)\n- Fixing external.py in blocks-dev to reflect the new HF Spaces paths by [@abidlabs](https://github.com/abidlabs) in [PR 879](https://github.com/gradio-app/gradio/pull/879)\n- backend_default_value_refactoring by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 871](https://github.com/gradio-app/gradio/pull/871)\n- fix default_value by [@pngwn](https://github.com/pngwn) in [PR 869](https://github.com/gradio-app/gradio/pull/869)\n- fix buttons by [@aliabid94](https://github.com/aliabid94) in [PR 883](https://github.com/gradio-app/gradio/pull/883)\n- Checking and updating more demos to use 3.0 syntax by [@abidlabs](https://github.com/abidlabs) in [PR 892](https://github.com/gradio-app/gradio/pull/892)\n- Blocks Tests by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 902](https://github.com/gradio-app/gradio/pull/902)\n- Interface fix by [@pngwn](https://github.com/pngwn) in [PR 901](https://github.com/gradio-app/gradio/pull/901)\n- Quick fix: Issue 893 by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 907](https://github.com/gradio-app/gradio/pull/907)\n- 3d Image Component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 775](https://github.com/gradio-app/gradio/pull/775)\n- fix endpoint url in prod by [@pngwn](https://github.com/pngwn) in [PR 911](https://github.com/gradio-app/gradio/pull/911)\n- rename Model3d to Image3D by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 912](https://github.com/gradio-app/gradio/pull/912)\n- update pypi to 2.9.1 by [@abidlabs](https://github.com/abidlabs) in [PR 916](https://github.com/gradio-app/gradio/pull/916)\n- blocks-with-fix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 917](https://github.com/gradio-app/gradio/pull/917)\n- Restore Interpretation, Live, Auth, Queueing by [@aliabid94](https://github.com/aliabid94) in [PR 915](https://github.com/gradio-app/gradio/pull/915)\n- Allow `Blocks` instances to be used like a `Block` in other `Blocks` by [@abidlabs](https://github.com/abidlabs) in [PR 919](https://github.com/gradio-app/gradio/pull/919)\n- Redesign 1 by [@pngwn](https://github.com/pngwn) in [PR 918](https://github.com/gradio-app/gradio/pull/918)\n- blocks-components-tests by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 904](https://github.com/gradio-app/gradio/pull/904)\n- fix unit + browser tests by [@pngwn](https://github.com/pngwn) in [PR 926](https://github.com/gradio-app/gradio/pull/926)\n- blocks-move-test-data by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 927](https://github.com/gradio-app/gradio/pull/927)\n- remove debounce from form inputs by [@pngwn](https://github.com/pngwn) in [PR 932](https://github.com/gradio-app/gradio/pull/932)\n- reimplement webcam video by [@pngwn](https://github.com/pngwn) in [PR 928](https://github.com/gradio-app/gradio/pull/928)\n- blocks-move-test-data by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 941](https://github.com/gradio-app/gradio/pull/941)\n- allow audio components to take a string value by [@pngwn](https://github.com/pngwn) in [PR 930](https://github.com/gradio-app/gradio/pull/930)\n- static mode for textbox by [@pngwn](https://github.com/pngwn) in [PR 929](https://github.com/gradio-app/gradio/pull/929)\n- fix file upload text by [@pngwn](https://github.com/pngwn) in [PR 931](https://github.com/gradio-app/gradio/pull/931)\n- tabbed-interface-rewritten by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 958](https://github.com/gradio-app/gradio/pull/958)\n- Gan demo fix by [@abidlabs](https://github.com/abidlabs) in [PR 965](https://github.com/gradio-app/gradio/pull/965)\n- Blocks analytics by [@abidlabs](https://github.com/abidlabs) in [PR 947](https://github.com/gradio-app/gradio/pull/947)\n- Blocks page load by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 963](https://github.com/gradio-app/gradio/pull/963)\n- add frontend for page load events by [@pngwn](https://github.com/pngwn) in [PR 967](https://github.com/gradio-app/gradio/pull/967)\n- fix i18n and some tweaks by [@pngwn](https://github.com/pngwn) in [PR 966](https://github.com/gradio-app/gradio/pull/966)\n- add jinja2 to reqs by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 969](https://github.com/gradio-app/gradio/pull/969)\n- Cleaning up `Launchable()` by [@abidlabs](https://github.com/abidlabs) in [PR 968](https://github.com/gradio-app/gradio/pull/968)\n- Fix #944 by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 971](https://github.com/gradio-app/gradio/pull/971)\n- New Blocks Demo: neural instrument cloning by [@abidlabs](https://github.com/abidlabs) in [PR 975](https://github.com/gradio-app/gradio/pull/975)\n- Add huggingface_hub client library by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 973](https://github.com/gradio-app/gradio/pull/973)\n- State and variables by [@aliabid94](https://github.com/aliabid94) in [PR 977](https://github.com/gradio-app/gradio/pull/977)\n- update-components by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 986](https://github.com/gradio-app/gradio/pull/986)\n- ensure dataframe updates as expected by [@pngwn](https://github.com/pngwn) in [PR 981](https://github.com/gradio-app/gradio/pull/981)\n- test-guideline by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 990](https://github.com/gradio-app/gradio/pull/990)\n- Issue #785: add footer by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 972](https://github.com/gradio-app/gradio/pull/972)\n- indentation fix by [@abidlabs](https://github.com/abidlabs) in [PR 993](https://github.com/gradio-app/gradio/pull/993)\n- missing quote by [@aliabd](https://github.com/aliabd) in [PR 996](https://github.com/gradio-app/gradio/pull/996)\n- added interactive parameter to components by [@abidlabs](https://github.com/abidlabs) in [PR 992](https://github.com/gradio-app/gradio/pull/992)\n- custom-components by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 985](https://github.com/gradio-app/gradio/pull/985)\n- Refactor component shortcuts by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 995](https://github.com/gradio-app/gradio/pull/995)\n- Plot Component by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 805](https://github.com/gradio-app/gradio/pull/805)\n- updated PyPi version to 2.9.2 by [@abidlabs](https://github.com/abidlabs) in [PR 1002](https://github.com/gradio-app/gradio/pull/1002)\n- Release 2.9.3 by [@abidlabs](https://github.com/abidlabs) in [PR 1003](https://github.com/gradio-app/gradio/pull/1003)\n- Image3D Examples Fix by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1001](https://github.com/gradio-app/gradio/pull/1001)\n- release 2.9.4 by [@abidlabs](https://github.com/abidlabs) in [PR 1006](https://github.com/gradio-app/gradio/pull/1006)\n- templates import hotfix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1008](https://github.com/gradio-app/gradio/pull/1008)\n- Progress indicator bar by [@aliabid94](https://github.com/aliabid94) in [PR 997](https://github.com/gradio-app/gradio/pull/997)\n- Fixed image input for absolute path by [@JefferyChiang](https://github.com/JefferyChiang) in [PR 1004](https://github.com/gradio-app/gradio/pull/1004)\n- Model3D + Plot Components by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1010](https://github.com/gradio-app/gradio/pull/1010)\n- Gradio Guides: Creating CryptoPunks with GANs by [@NimaBoscarino](https://github.com/NimaBoscarino) in [PR 1000](https://github.com/gradio-app/gradio/pull/1000)\n- [BIG PR] Gradio blocks & redesigned components by [@abidlabs](https://github.com/abidlabs) in [PR 880](https://github.com/gradio-app/gradio/pull/880)\n- fixed failing test on main by [@abidlabs](https://github.com/abidlabs) in [PR 1023](https://github.com/gradio-app/gradio/pull/1023)\n- Use smaller ASR model in external test by [@abidlabs](https://github.com/abidlabs) in [PR 1024](https://github.com/gradio-app/gradio/pull/1024)\n- updated PyPi version to 2.9.0b by [@abidlabs](https://github.com/abidlabs) in [PR 1026](https://github.com/gradio-app/gradio/pull/1026)\n- Fixing import issues so that the package successfully installs on colab notebooks by [@abidlabs](https://github.com/abidlabs) in [PR 1027](https://github.com/gradio-app/gradio/pull/1027)\n- Update website tracker slackbot by [@aliabd](https://github.com/aliabd) in [PR 1037](https://github.com/gradio-app/gradio/pull/1037)\n- textbox-autoheight by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1009](https://github.com/gradio-app/gradio/pull/1009)\n- Model3D Examples fixes by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1035](https://github.com/gradio-app/gradio/pull/1035)\n- GAN Gradio Guide: Adjustments to iframe heights by [@NimaBoscarino](https://github.com/NimaBoscarino) in [PR 1042](https://github.com/gradio-app/gradio/pull/1042)\n- added better default labels to form components by [@abidlabs](https://github.com/abidlabs) in [PR 1040](https://github.com/gradio-app/gradio/pull/1040)\n- Slackbot web tracker fix by [@aliabd](https://github.com/aliabd) in [PR 1043](https://github.com/gradio-app/gradio/pull/1043)\n- Plot fixes by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1044](https://github.com/gradio-app/gradio/pull/1044)\n- Small fixes to the demos by [@abidlabs](https://github.com/abidlabs) in [PR 1030](https://github.com/gradio-app/gradio/pull/1030)\n- fixing demo issue with website by [@aliabd](https://github.com/aliabd) in [PR 1047](https://github.com/gradio-app/gradio/pull/1047)\n- [hotfix] HighlightedText by [@aliabid94](https://github.com/aliabid94) in [PR 1046](https://github.com/gradio-app/gradio/pull/1046)\n- Update text by [@ronvoluted](https://github.com/ronvoluted) in [PR 1050](https://github.com/gradio-app/gradio/pull/1050)\n- Update CONTRIBUTING.md by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1052](https://github.com/gradio-app/gradio/pull/1052)\n- fix(ui): Increase contrast for footer by [@ronvoluted](https://github.com/ronvoluted) in [PR 1048](https://github.com/gradio-app/gradio/pull/1048)\n- UI design update by [@gary149](https://github.com/gary149) in [PR 1041](https://github.com/gradio-app/gradio/pull/1041)\n- updated PyPi version to 2.9.0b8 by [@abidlabs](https://github.com/abidlabs) in [PR 1059](https://github.com/gradio-app/gradio/pull/1059)\n- Running, testing, and fixing demos by [@abidlabs](https://github.com/abidlabs) in [PR 1060](https://github.com/gradio-app/gradio/pull/1060)\n- Form layout by [@pngwn](https://github.com/pngwn) in [PR 1054](https://github.com/gradio-app/gradio/pull/1054)\n- inputless-interfaces by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1038](https://github.com/gradio-app/gradio/pull/1038)\n- Update PULL_REQUEST_TEMPLATE.md by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1068](https://github.com/gradio-app/gradio/pull/1068)\n- Upgrading node memory to 4gb in website Docker by [@aliabd](https://github.com/aliabd) in [PR 1069](https://github.com/gradio-app/gradio/pull/1069)\n- Website reload error by [@aliabd](https://github.com/aliabd) in [PR 1079](https://github.com/gradio-app/gradio/pull/1079)\n- fixed favicon issue by [@abidlabs](https://github.com/abidlabs) in [PR 1064](https://github.com/gradio-app/gradio/pull/1064)\n- remove-queue-from-events by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1056](https://github.com/gradio-app/gradio/pull/1056)\n- Enable vertex colors for OBJs files by [@radames](https://github.com/radames) in [PR 1074](https://github.com/gradio-app/gradio/pull/1074)\n- Dark text by [@ronvoluted](https://github.com/ronvoluted) in [PR 1049](https://github.com/gradio-app/gradio/pull/1049)\n- Scroll to output by [@pngwn](https://github.com/pngwn) in [PR 1077](https://github.com/gradio-app/gradio/pull/1077)\n- Explicitly list pnpm version 6 in contributing guide by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1085](https://github.com/gradio-app/gradio/pull/1085)\n- hotfix for encrypt issue by [@abidlabs](https://github.com/abidlabs) in [PR 1096](https://github.com/gradio-app/gradio/pull/1096)\n- Release 2.9b9 by [@abidlabs](https://github.com/abidlabs) in [PR 1098](https://github.com/gradio-app/gradio/pull/1098)\n- tweak node circleci settings by [@pngwn](https://github.com/pngwn) in [PR 1091](https://github.com/gradio-app/gradio/pull/1091)\n- Website Reload Error by [@aliabd](https://github.com/aliabd) in [PR 1099](https://github.com/gradio-app/gradio/pull/1099)\n- Website Reload: README in demos docker by [@aliabd](https://github.com/aliabd) in [PR 1100](https://github.com/gradio-app/gradio/pull/1100)\n- Flagging fixes by [@abidlabs](https://github.com/abidlabs) in [PR 1081](https://github.com/gradio-app/gradio/pull/1081)\n- Backend for optional labels by [@abidlabs](https://github.com/abidlabs) in [PR 1080](https://github.com/gradio-app/gradio/pull/1080)\n- Optional labels fe by [@pngwn](https://github.com/pngwn) in [PR 1105](https://github.com/gradio-app/gradio/pull/1105)\n- clean-deprecated-parameters by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1090](https://github.com/gradio-app/gradio/pull/1090)\n- Blocks rendering fix by [@abidlabs](https://github.com/abidlabs) in [PR 1102](https://github.com/gradio-app/gradio/pull/1102)\n- Redos #1106 by [@abidlabs](https://github.com/abidlabs) in [PR 1112](https://github.com/gradio-app/gradio/pull/1112)\n- Interface types: handle input-only, output-only, and unified interfaces by [@abidlabs](https://github.com/abidlabs) in [PR 1108](https://github.com/gradio-app/gradio/pull/1108)\n- Hotfix + New pypi release 2.9b11 by [@abidlabs](https://github.com/abidlabs) in [PR 1118](https://github.com/gradio-app/gradio/pull/1118)\n- issue-checkbox by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1122](https://github.com/gradio-app/gradio/pull/1122)\n- issue-checkbox-hotfix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1127](https://github.com/gradio-app/gradio/pull/1127)\n- Fix demos in website by [@aliabd](https://github.com/aliabd) in [PR 1130](https://github.com/gradio-app/gradio/pull/1130)\n- Guide for Gradio ONNX model zoo on Huggingface by [@AK391](https://github.com/AK391) in [PR 1073](https://github.com/gradio-app/gradio/pull/1073)\n- ONNX guide fixes by [@aliabd](https://github.com/aliabd) in [PR 1131](https://github.com/gradio-app/gradio/pull/1131)\n- Stacked form inputs css by [@gary149](https://github.com/gary149) in [PR 1134](https://github.com/gradio-app/gradio/pull/1134)\n- made default value in textbox empty string by [@abidlabs](https://github.com/abidlabs) in [PR 1135](https://github.com/gradio-app/gradio/pull/1135)\n- Examples UI by [@gary149](https://github.com/gary149) in [PR 1121](https://github.com/gradio-app/gradio/pull/1121)\n- Chatbot custom color support by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1092](https://github.com/gradio-app/gradio/pull/1092)\n- highlighted text colors by [@pngwn](https://github.com/pngwn) in [PR 1119](https://github.com/gradio-app/gradio/pull/1119)\n- pin to pnpm 6 for now by [@pngwn](https://github.com/pngwn) in [PR 1147](https://github.com/gradio-app/gradio/pull/1147)\n- Restore queue in Blocks by [@aliabid94](https://github.com/aliabid94) in [PR 1137](https://github.com/gradio-app/gradio/pull/1137)\n- add select event for tabitems by [@pngwn](https://github.com/pngwn) in [PR 1154](https://github.com/gradio-app/gradio/pull/1154)\n- max_lines + autoheight for textbox by [@pngwn](https://github.com/pngwn) in [PR 1153](https://github.com/gradio-app/gradio/pull/1153)\n- use color palette for chatbot by [@pngwn](https://github.com/pngwn) in [PR 1152](https://github.com/gradio-app/gradio/pull/1152)\n- Timeseries improvements by [@pngwn](https://github.com/pngwn) in [PR 1149](https://github.com/gradio-app/gradio/pull/1149)\n- move styling for interface panels to frontend by [@pngwn](https://github.com/pngwn) in [PR 1146](https://github.com/gradio-app/gradio/pull/1146)\n- html tweaks by [@pngwn](https://github.com/pngwn) in [PR 1145](https://github.com/gradio-app/gradio/pull/1145)\n- Issue #768: Support passing none to resize and crop image by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1144](https://github.com/gradio-app/gradio/pull/1144)\n- image gallery component + img css by [@aliabid94](https://github.com/aliabid94) in [PR 1140](https://github.com/gradio-app/gradio/pull/1140)\n- networking tweak by [@abidlabs](https://github.com/abidlabs) in [PR 1143](https://github.com/gradio-app/gradio/pull/1143)\n- Allow enabling queue per event listener by [@aliabid94](https://github.com/aliabid94) in [PR 1155](https://github.com/gradio-app/gradio/pull/1155)\n- config hotfix and v. 2.9b23 by [@abidlabs](https://github.com/abidlabs) in [PR 1158](https://github.com/gradio-app/gradio/pull/1158)\n- Custom JS calls by [@aliabid94](https://github.com/aliabid94) in [PR 1082](https://github.com/gradio-app/gradio/pull/1082)\n- Small fixes: queue default fix, ffmpeg installation message by [@abidlabs](https://github.com/abidlabs) in [PR 1159](https://github.com/gradio-app/gradio/pull/1159)\n- formatting by [@abidlabs](https://github.com/abidlabs) in [PR 1161](https://github.com/gradio-app/gradio/pull/1161)\n- enable flex grow for gr-box by [@radames](https://github.com/radames) in [PR 1165](https://github.com/gradio-app/gradio/pull/1165)\n- 1148 loading by [@pngwn](https://github.com/pngwn) in [PR 1164](https://github.com/gradio-app/gradio/pull/1164)\n- Put enable_queue kwarg back in launch() by [@aliabid94](https://github.com/aliabid94) in [PR 1167](https://github.com/gradio-app/gradio/pull/1167)\n- A few small fixes by [@abidlabs](https://github.com/abidlabs) in [PR 1171](https://github.com/gradio-app/gradio/pull/1171)\n- Hotfix for dropdown component by [@abidlabs](https://github.com/abidlabs) in [PR 1172](https://github.com/gradio-app/gradio/pull/1172)\n- use secondary buttons in interface by [@pngwn](https://github.com/pngwn) in [PR 1173](https://github.com/gradio-app/gradio/pull/1173)\n- 1183 component height by [@pngwn](https://github.com/pngwn) in [PR 1185](https://github.com/gradio-app/gradio/pull/1185)\n- 962 dataframe by [@pngwn](https://github.com/pngwn) in [PR 1186](https://github.com/gradio-app/gradio/pull/1186)\n- update-contributing by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1188](https://github.com/gradio-app/gradio/pull/1188)\n- Table tweaks by [@pngwn](https://github.com/pngwn) in [PR 1195](https://github.com/gradio-app/gradio/pull/1195)\n- wrap tab content in column by [@pngwn](https://github.com/pngwn) in [PR 1200](https://github.com/gradio-app/gradio/pull/1200)\n- WIP: Add dark mode support by [@gary149](https://github.com/gary149) in [PR 1187](https://github.com/gradio-app/gradio/pull/1187)\n- Restored /api/predict/ endpoint for Interfaces by [@abidlabs](https://github.com/abidlabs) in [PR 1199](https://github.com/gradio-app/gradio/pull/1199)\n- hltext-label by [@pngwn](https://github.com/pngwn) in [PR 1204](https://github.com/gradio-app/gradio/pull/1204)\n- add copy functionality to json by [@pngwn](https://github.com/pngwn) in [PR 1205](https://github.com/gradio-app/gradio/pull/1205)\n- Update component config by [@aliabid94](https://github.com/aliabid94) in [PR 1089](https://github.com/gradio-app/gradio/pull/1089)\n- fix placeholder prompt by [@pngwn](https://github.com/pngwn) in [PR 1215](https://github.com/gradio-app/gradio/pull/1215)\n- ensure webcam video value is propagated correctly by [@pngwn](https://github.com/pngwn) in [PR 1218](https://github.com/gradio-app/gradio/pull/1218)\n- Automatic word-break in highlighted text, combine_adjacent support by [@aliabid94](https://github.com/aliabid94) in [PR 1209](https://github.com/gradio-app/gradio/pull/1209)\n- async-function-support by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1190](https://github.com/gradio-app/gradio/pull/1190)\n- Sharing fix for assets by [@aliabid94](https://github.com/aliabid94) in [PR 1208](https://github.com/gradio-app/gradio/pull/1208)\n- Hotfixes for course demos by [@abidlabs](https://github.com/abidlabs) in [PR 1222](https://github.com/gradio-app/gradio/pull/1222)\n- Allow Custom CSS by [@aliabid94](https://github.com/aliabid94) in [PR 1170](https://github.com/gradio-app/gradio/pull/1170)\n- share-hotfix by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1226](https://github.com/gradio-app/gradio/pull/1226)\n- tweaks by [@pngwn](https://github.com/pngwn) in [PR 1229](https://github.com/gradio-app/gradio/pull/1229)\n- white space for class concatenation by [@radames](https://github.com/radames) in [PR 1228](https://github.com/gradio-app/gradio/pull/1228)\n- Tweaks by [@pngwn](https://github.com/pngwn) in [PR 1230](https://github.com/gradio-app/gradio/pull/1230)\n- css tweaks by [@pngwn](https://github.com/pngwn) in [PR 1235](https://github.com/gradio-app/gradio/pull/1235)\n- ensure defaults height match for media inputs by [@pngwn](https://github.com/pngwn) in [PR 1236](https://github.com/gradio-app/gradio/pull/1236)\n- Default Label label value by [@radames](https://github.com/radames) in [PR 1239](https://github.com/gradio-app/gradio/pull/1239)\n- update-shortcut-syntax by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1234](https://github.com/gradio-app/gradio/pull/1234)\n- Update version.txt by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1244](https://github.com/gradio-app/gradio/pull/1244)\n- Layout bugs by [@pngwn](https://github.com/pngwn) in [PR 1246](https://github.com/gradio-app/gradio/pull/1246)\n- Update demo by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1253](https://github.com/gradio-app/gradio/pull/1253)\n- Button default name by [@FarukOzderim](https://github.com/FarukOzderim) in [PR 1243](https://github.com/gradio-app/gradio/pull/1243)\n- Labels spacing by [@gary149](https://github.com/gary149) in [PR 1254](https://github.com/gradio-app/gradio/pull/1254)\n- add global loader for gradio app by [@pngwn](https://github.com/pngwn) in [PR 1251](https://github.com/gradio-app/gradio/pull/1251)\n- ui apis for dalle-mini by [@pngwn](https://github.com/pngwn) in [PR 1258](https://github.com/gradio-app/gradio/pull/1258)\n- Add precision to Number, backend only by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 1125](https://github.com/gradio-app/gradio/pull/1125)\n- Website Design Changes by [@abidlabs](https://github.com/abidlabs) in [PR 1015](https://github.com/gradio-app/gradio/pull/1015)\n- Small fixes for multiple demos compatible with 3.0 by [@radames](https://github.com/radames) in [PR 1257](https://github.com/gradio-app/gradio/pull/1257)\n- Issue #1160: Model 3D component not destroyed correctly by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 1219](https://github.com/gradio-app/gradio/pull/1219)\n- Fixes to components by [@abidlabs](https://github.com/abidlabs) in [PR 1260](https://github.com/gradio-app/gradio/pull/1260)\n- layout docs by [@abidlabs](https://github.com/abidlabs) in [PR 1263](https://github.com/gradio-app/gradio/pull/1263)\n- Static forms by [@pngwn](https://github.com/pngwn) in [PR 1264](https://github.com/gradio-app/gradio/pull/1264)\n- Cdn assets by [@pngwn](https://github.com/pngwn) in [PR 1265](https://github.com/gradio-app/gradio/pull/1265)\n- update logo by [@gary149](https://github.com/gary149) in [PR 1266](https://github.com/gradio-app/gradio/pull/1266)\n- fix slider by [@aliabid94](https://github.com/aliabid94) in [PR 1268](https://github.com/gradio-app/gradio/pull/1268)\n- maybe fix auth in iframes by [@pngwn](https://github.com/pngwn) in [PR 1261](https://github.com/gradio-app/gradio/pull/1261)\n- Improves \"Getting Started\" guide by [@abidlabs](https://github.com/abidlabs) in [PR 1269](https://github.com/gradio-app/gradio/pull/1269)\n- Add embedded demos to website by [@aliabid94](https://github.com/aliabid94) in [PR 1270](https://github.com/gradio-app/gradio/pull/1270)\n- Label hotfixes by [@abidlabs](https://github.com/abidlabs) in [PR 1281](https://github.com/gradio-app/gradio/pull/1281)\n- General tweaks by [@pngwn](https://github.com/pngwn) in [PR 1276](https://github.com/gradio-app/gradio/pull/1276)\n- only affect links within the document by [@pngwn](https://github.com/pngwn) in [PR 1282](https://github.com/gradio-app/gradio/pull/1282)\n- release 3.0b9 by [@abidlabs](https://github.com/abidlabs) in [PR 1283](https://github.com/gradio-app/gradio/pull/1283)\n- Dm by [@pngwn](https://github.com/pngwn) in [PR 1284](https://github.com/gradio-app/gradio/pull/1284)\n- Website fixes by [@aliabd](https://github.com/aliabd) in [PR 1286](https://github.com/gradio-app/gradio/pull/1286)\n- Create Streamables by [@aliabid94](https://github.com/aliabid94) in [PR 1279](https://github.com/gradio-app/gradio/pull/1279)\n- ensure table works on mobile by [@pngwn](https://github.com/pngwn) in [PR 1277](https://github.com/gradio-app/gradio/pull/1277)\n- changes by [@aliabid94](https://github.com/aliabid94) in [PR 1287](https://github.com/gradio-app/gradio/pull/1287)\n- demo alignment on landing page by [@aliabd](https://github.com/aliabd) in [PR 1288](https://github.com/gradio-app/gradio/pull/1288)\n- New meta img by [@aliabd](https://github.com/aliabd) in [PR 1289](https://github.com/gradio-app/gradio/pull/1289)\n- updated PyPi version to 3.0 by [@abidlabs](https://github.com/abidlabs) in [PR 1290](https://github.com/gradio-app/gradio/pull/1290)\n- Fix site by [@aliabid94](https://github.com/aliabid94) in [PR 1291](https://github.com/gradio-app/gradio/pull/1291)\n- Mobile responsive guides by [@aliabd](https://github.com/aliabd) in [PR 1293](https://github.com/gradio-app/gradio/pull/1293)\n- Update readme by [@abidlabs](https://github.com/abidlabs) in [PR 1292](https://github.com/gradio-app/gradio/pull/1292)\n- gif by [@abidlabs](https://github.com/abidlabs) in [PR 1296](https://github.com/gradio-app/gradio/pull/1296)\n- Allow decoding headerless b64 string [@1lint](https://github.com/1lint) in [PR 4031](https://github.com/gradio-app/gradio/pull/4031)\n\n### Contributors Shoutout:\n\n- [@JefferyChiang](https://github.com/JefferyChiang) made their first contribution in [PR 1004](https://github.com/gradio-app/gradio/pull/1004)\n- [@NimaBoscarino](https://github.com/NimaBoscarino) made their first contribution in [PR 1000](https://github.com/gradio-app/gradio/pull/1000)\n- [@ronvoluted](https://github.com/ronvoluted) made their first contribution in [PR 1050](https://github.com/gradio-app/gradio/pull/1050)\n- [@radames](https://github.com/radames) made their first contribution in [PR 1074](https://github.com/gradio-app/gradio/pull/1074)\n- [@freddyaboulton](https://github.com/freddyaboulton) made their first contribution in [PR 1085](https://github.com/gradio-app/gradio/pull/1085)\n- [@liteli1987gmail](https://github.com/liteli1987gmail) & [@chenglu](https://github.com/chenglu) made their first contribution in [PR 4767](https://github.com/gradio-app/gradio/pull/4767)\n"} \ No newline at end of file diff --git a/js/_website/src/routes/demos/demos.json b/js/_website/src/routes/demos/demos.json index 05e29719f18d8..704de572ec5ae 100644 --- a/js/_website/src/routes/demos/demos.json +++ b/js/_website/src/routes/demos/demos.json @@ -1,172 +1 @@ -[ - { - "category": "\ud83d\udd8a\ufe0f Text & Natural Language Processing", - "demos": [ - { - "name": "Hello World", - "dir": "hello_world", - "code": "import gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n \nif __name__ == \"__main__\":\n demo.launch() ", - "text": "The simplest possible Gradio demo. It wraps a 'Hello {name}!' function in an Interface that accepts and returns text." - }, - { - "name": "Text Generation", - "dir": "text_generation", - "code": "import gradio as gr\nfrom transformers import pipeline\n\ngenerator = pipeline('text-generation', model='gpt2')\n\ndef generate(text):\n result = generator(text, max_length=30, num_return_sequences=1)\n return result[0][\"generated_text\"]\n\nexamples = [\n [\"The Moon's orbit around Earth has\"],\n [\"The smooth Borealis basin in the Northern Hemisphere covers 40%\"],\n]\n\ndemo = gr.Interface(\n fn=generate,\n inputs=gr.inputs.Textbox(lines=5, label=\"Input Text\"),\n outputs=gr.outputs.Textbox(label=\"Generated Text\"),\n examples=examples\n)\n\ndemo.launch()\n", - "text": "This text generation demo takes in input text and returns generated text. It uses the Transformers library to set up the model and has two examples." - }, - { - "name": "Autocomplete", - "dir": "autocomplete", - "code": "import gradio as gr\nimport os\n\n# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\nauth_token = os.getenv(\"auth_token\")\n\n# load a model from https://hf.co/models as an interface, then use it as an api \n# you can remove the api_key parameter if you don't care about rate limiting. \napi = gr.load(\"huggingface/gpt2-xl\", hf_token=auth_token)\n\ndef complete_with_gpt(text):\n return text[:-50] + api(text[-50:])\n\nwith gr.Blocks() as demo:\n textbox = gr.Textbox(placeholder=\"Type here...\", lines=4)\n btn = gr.Button(\"Autocomplete\")\n \n # define what will run when the button is clicked, here the textbox is used as both an input and an output\n btn.click(fn=complete_with_gpt, inputs=textbox, outputs=textbox, queue=False)\n\ndemo.launch()", - "text": "This text generation demo works like autocomplete. There's only one textbox and it's used for both the input and the output. The demo loads the model as an interface, and uses that interface as an API. It then uses blocks to create the UI. All of this is done in less than 10 lines of code." - }, - { - "name": "Sentiment Analysis", - "dir": "sentiment_analysis", - "code": "import gradio as gr\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\nnltk.download(\"vader_lexicon\")\nsid = SentimentIntensityAnalyzer()\n\ndef sentiment_analysis(text):\n scores = sid.polarity_scores(text)\n del scores[\"compound\"]\n return scores\n\ndemo = gr.Interface(\n fn=sentiment_analysis, \n inputs=gr.Textbox(placeholder=\"Enter a positive or negative sentence here...\"), \n outputs=\"label\", \n interpretation=\"default\",\n examples=[[\"This is wonderful!\"]])\n\ndemo.launch()", - "text": "This sentiment analaysis demo takes in input text and returns its classification for either positive, negative or neutral using Gradio's Label output. It also uses the default interpretation method so users can click the Interpret button after a submission and see which words had the biggest effect on the output." - }, - { - "name": "Named Entity Recognition", - "dir": "text_analysis", - "code": "import gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n doc = nlp(text)\n html = displacy.render(doc, style=\"dep\", page=True)\n html = (\n \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n", - "text": "This simple demo takes advantage of Gradio's HighlightedText, JSON and HTML outputs to create a clear NER segmentation." - }, - { - "name": "Multilingual Translation", - "dir": "translation", - "code": "import gradio as gr\nfrom transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline\nimport torch\n\n# this model was loaded from https://hf.co/models\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"facebook/nllb-200-distilled-600M\")\ntokenizer = AutoTokenizer.from_pretrained(\"facebook/nllb-200-distilled-600M\")\ndevice = 0 if torch.cuda.is_available() else -1\nLANGS = [\"ace_Arab\", \"eng_Latn\", \"fra_Latn\", \"spa_Latn\"]\n\ndef translate(text, src_lang, tgt_lang):\n \"\"\"\n Translate the text from source lang to target lang\n \"\"\"\n translation_pipeline = pipeline(\"translation\", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device)\n result = translation_pipeline(text)\n return result[0]['translation_text']\n\ndemo = gr.Interface(\n fn=translate,\n inputs=[\n gr.components.Textbox(label=\"Text\"),\n gr.components.Dropdown(label=\"Source Language\", choices=LANGS),\n gr.components.Dropdown(label=\"Target Language\", choices=LANGS),\n ],\n outputs=[\"text\"],\n examples=[[\"Building a translation demo with Gradio is so easy!\", \"eng_Latn\", \"spa_Latn\"]],\n cache_examples=False,\n title=\"Translation Demo\",\n description=\"This demo is a simplified version of the original [NLLB-Translator](https://huggingface.co/spaces/Narrativaai/NLLB-Translator) space\"\n)\n\ndemo.launch()", - "text": "This translation demo takes in the text, source and target languages, and returns the translation. It uses the Transformers library to set up the model and has a title, description, and example." - } - ] - }, - { - "category": "\ud83d\uddbc\ufe0f Images & Computer Vision", - "demos": [ - { - "name": "Image Classification", - "dir": "image_classification", - "code": "import gradio as gr\nimport torch\nimport requests\nfrom torchvision import transforms\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n inp = transforms.ToTensor()(inp).unsqueeze(0)\n with torch.no_grad():\n prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)} \n return confidences\n\ndemo = gr.Interface(fn=predict, \n inputs=gr.inputs.Image(type=\"pil\"),\n outputs=gr.outputs.Label(num_top_classes=3),\n examples=[[\"cheetah.jpg\"]],\n )\n \ndemo.launch()", - "text": "Simple image classification in Pytorch with Gradio's Image input and Label output." - }, - { - "name": "Image Segmentation", - "dir": "image_segmentation", - "code": "import gradio as gr\nimport numpy as np\nimport random\n\nwith gr.Blocks() as demo:\n section_labels = [\n \"apple\",\n \"banana\",\n \"carrot\",\n \"donut\",\n \"eggplant\",\n \"fish\",\n \"grapes\",\n \"hamburger\",\n \"ice cream\",\n \"juice\",\n ]\n\n with gr.Row():\n num_boxes = gr.Slider(0, 5, 2, step=1, label=\"Number of boxes\")\n num_segments = gr.Slider(0, 5, 1, step=1, label=\"Number of segments\")\n\n with gr.Row():\n img_input = gr.Image()\n img_output = gr.AnnotatedImage().style(\n color_map={\"banana\": \"#a89a00\", \"carrot\": \"#ffae00\"}\n )\n\n section_btn = gr.Button(\"Identify Sections\")\n selected_section = gr.Textbox(label=\"Selected Section\")\n\n def section(img, num_boxes, num_segments):\n sections = []\n for a in range(num_boxes):\n x = random.randint(0, img.shape[1])\n y = random.randint(0, img.shape[0])\n w = random.randint(0, img.shape[1] - x)\n h = random.randint(0, img.shape[0] - y)\n sections.append(((x, y, x + w, y + h), section_labels[a]))\n for b in range(num_segments):\n x = random.randint(0, img.shape[1])\n y = random.randint(0, img.shape[0])\n r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))\n mask = np.zeros(img.shape[:2])\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n dist_square = (i - y) ** 2 + (j - x) ** 2\n if dist_square < r**2:\n mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4\n sections.append((mask, section_labels[b + num_boxes]))\n return (img, sections)\n\n section_btn.click(section, [img_input, num_boxes, num_segments], img_output)\n\n def select_section(evt: gr.SelectData):\n return section_labels[evt.index]\n\n img_output.select(select_section, None, selected_section)\n\nif __name__ == \"__main__\":\n demo.launch()\n", - "text": "Simple image segmentation using gradio's AnnotatedImage component." - }, - { - "name": "Image Transformation with AnimeGAN", - "dir": "animeganv2", - "code": "import gradio as gr\nimport torch\n\nmodel2 = torch.hub.load(\n \"AK391/animegan2-pytorch:main\",\n \"generator\",\n pretrained=True,\n progress=False\n)\nmodel1 = torch.hub.load(\"AK391/animegan2-pytorch:main\", \"generator\", pretrained=\"face_paint_512_v1\")\nface2paint = torch.hub.load(\n 'AK391/animegan2-pytorch:main', 'face2paint', \n size=512,side_by_side=False\n)\n\ndef inference(img, ver):\n if ver == 'version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)':\n out = face2paint(model2, img)\n else:\n out = face2paint(model1, img)\n return out\n\ntitle = \"AnimeGANv2\"\ndescription = \"Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below.\"\narticle = \"

Github Repo Pytorch

visitor badge

\"\nexamples=[['groot.jpeg','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'],['gongyoo.jpeg','version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)']]\n\ndemo = gr.Interface(\n fn=inference, \n inputs=[gr.inputs.Image(type=\"pil\"),gr.inputs.Radio(['version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'], type=\"value\", default='version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)', label='version')], \n outputs=gr.outputs.Image(type=\"pil\"),\n title=title,\n description=description,\n article=article,\n examples=examples)\n\ndemo.launch()", - "text": "Recreate the viral AnimeGAN image transformation demo." - }, - { - "name": "Image Generation (Fake GAN)", - "dir": "fake_gan", - "code": "# This demo needs to be run from the repo folder.\n# python demo/fake_gan/run.py\nimport random\n\nimport gradio as gr\n\n\ndef fake_gan():\n images = [\n (random.choice(\n [\n \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n ]\n ), f\"label {i}\" if i != 0 else \"label\" * 50)\n for i in range(3)\n ]\n return images\n\n\nwith gr.Blocks() as demo:\n with gr.Column(variant=\"panel\"):\n with gr.Row(variant=\"compact\"):\n text = gr.Textbox(\n label=\"Enter your prompt\",\n show_label=False,\n max_lines=1,\n placeholder=\"Enter your prompt\",\n ).style(\n container=False,\n )\n btn = gr.Button(\"Generate image\").style(full_width=False)\n\n gallery = gr.Gallery(\n label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n ).style(columns=[2], rows=[2], object_fit=\"contain\", height=\"auto\")\n\n btn.click(fake_gan, None, gallery)\n\nif __name__ == \"__main__\":\n demo.launch()\n", - "text": "This is a fake GAN that shows how to create a text-to-image interface for image generation. Check out the Stable Diffusion demo for more: https://hf.co/spaces/stabilityai/stable-diffusion/" - }, - { - "name": "Iterative Output", - "dir": "fake_diffusion", - "code": "import gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n for _ in range(steps):\n time.sleep(1)\n image = np.random.random((600, 600, 3))\n yield image\n image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n", - "text": "This demo uses a fake model to showcase iterative output. The Image output will update every time a generator is returned until the final image." - }, - { - "name": "3D Models", - "dir": "depth_estimation", - "code": "import gradio as gr\nfrom transformers import DPTFeatureExtractor, DPTForDepthEstimation\nimport torch\nimport numpy as np\nfrom PIL import Image\nimport open3d as o3d\nfrom pathlib import Path\n\nfeature_extractor = DPTFeatureExtractor.from_pretrained(\"Intel/dpt-large\")\nmodel = DPTForDepthEstimation.from_pretrained(\"Intel/dpt-large\")\n\ndef process_image(image_path):\n image_path = Path(image_path)\n image_raw = Image.open(image_path)\n image = image_raw.resize(\n (800, int(800 * image_raw.size[1] / image_raw.size[0])),\n Image.Resampling.LANCZOS)\n\n # prepare image for the model\n encoding = feature_extractor(image, return_tensors=\"pt\")\n\n # forward pass\n with torch.no_grad():\n outputs = model(**encoding)\n predicted_depth = outputs.predicted_depth\n\n # interpolate to original size\n prediction = torch.nn.functional.interpolate(\n predicted_depth.unsqueeze(1),\n size=image.size[::-1],\n mode=\"bicubic\",\n align_corners=False,\n ).squeeze()\n output = prediction.cpu().numpy()\n depth_image = (output * 255 / np.max(output)).astype('uint8')\n try:\n gltf_path = create_3d_obj(np.array(image), depth_image, image_path)\n img = Image.fromarray(depth_image)\n return [img, gltf_path, gltf_path]\n except Exception:\n gltf_path = create_3d_obj(\n np.array(image), depth_image, image_path, depth=8)\n img = Image.fromarray(depth_image)\n return [img, gltf_path, gltf_path]\n except:\n print(\"Error reconstructing 3D model\")\n raise Exception(\"Error reconstructing 3D model\")\n\n\ndef create_3d_obj(rgb_image, depth_image, image_path, depth=10):\n depth_o3d = o3d.geometry.Image(depth_image)\n image_o3d = o3d.geometry.Image(rgb_image)\n rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n image_o3d, depth_o3d, convert_rgb_to_intensity=False)\n w = int(depth_image.shape[1])\n h = int(depth_image.shape[0])\n\n camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()\n camera_intrinsic.set_intrinsics(w, h, 500, 500, w/2, h/2)\n\n pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n rgbd_image, camera_intrinsic)\n\n print('normals')\n pcd.normals = o3d.utility.Vector3dVector(\n np.zeros((1, 3))) # invalidate existing normals\n pcd.estimate_normals(\n search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=30))\n pcd.orient_normals_towards_camera_location(\n camera_location=np.array([0., 0., 1000.]))\n pcd.transform([[1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1]])\n pcd.transform([[-1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n print('run Poisson surface reconstruction')\n with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug):\n mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(\n pcd, depth=depth, width=0, scale=1.1, linear_fit=True)\n\n voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / 256\n print(f'voxel_size = {voxel_size:e}')\n mesh = mesh_raw.simplify_vertex_clustering(\n voxel_size=voxel_size,\n contraction=o3d.geometry.SimplificationContraction.Average)\n\n # vertices_to_remove = densities < np.quantile(densities, 0.001)\n # mesh.remove_vertices_by_mask(vertices_to_remove)\n bbox = pcd.get_axis_aligned_bounding_box()\n mesh_crop = mesh.crop(bbox)\n gltf_path = f'./{image_path.stem}.gltf'\n o3d.io.write_triangle_mesh(\n gltf_path, mesh_crop, write_triangle_uvs=True)\n return gltf_path\n\ntitle = \"Demo: zero-shot depth estimation with DPT + 3D Point Cloud\"\ndescription = \"This demo is a variation from the original DPT Demo. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object.\"\nexamples = [[\"examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\"]]\n\niface = gr.Interface(fn=process_image,\n inputs=[gr.Image(\n type=\"filepath\", label=\"Input Image\")],\n outputs=[gr.Image(label=\"predicted depth\", type=\"pil\"),\n gr.Model3D(label=\"3d mesh reconstruction\", clear_color=[\n 1.0, 1.0, 1.0, 1.0]),\n gr.File(label=\"3d gLTF\")],\n title=title,\n description=description,\n examples=examples,\n allow_flagging=\"never\",\n cache_examples=False)\n\niface.launch(debug=True, enable_queue=False)", - "text": "A demo for predicting the depth of an image and generating a 3D model of it." - } - ] - }, - { - "category": "\ud83d\udcc8 Tabular Data & Plots", - "demos": [ - { - "name": "Interactive Dashboard", - "dir": "dashboard", - "code": "import gradio as gr\nimport pandas as pd\nimport plotly.express as px\nfrom helpers import *\n\n\nLIBRARIES = [\"accelerate\", \"datasets\", \"diffusers\", \"evaluate\", \"gradio\", \"hub_docs\",\n \"huggingface_hub\", \"optimum\", \"pytorch_image_models\", \"tokenizers\", \"transformers\"]\n\n\ndef create_pip_plot(libraries, pip_choices):\n if \"Pip\" not in pip_choices:\n return gr.update(visible=False)\n output = retrieve_pip_installs(libraries, \"Cumulated\" in pip_choices)\n df = pd.DataFrame(output).melt(id_vars=\"day\")\n plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n title=\"Pip installs\")\n plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n return gr.update(value=plot, visible=True)\n\n\ndef create_star_plot(libraries, star_choices):\n if \"Stars\" not in star_choices:\n return gr.update(visible=False)\n output = retrieve_stars(libraries, \"Week over Week\" in star_choices)\n df = pd.DataFrame(output).melt(id_vars=\"day\")\n plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n title=\"Number of stargazers\")\n plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n return gr.update(value=plot, visible=True)\n\n\ndef create_issue_plot(libraries, issue_choices):\n if \"Issue\" not in issue_choices:\n return gr.update(visible=False)\n output = retrieve_issues(libraries,\n exclude_org_members=\"Exclude org members\" in issue_choices,\n week_over_week=\"Week over Week\" in issue_choices)\n df = pd.DataFrame(output).melt(id_vars=\"day\")\n plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n title=\"Cumulated number of issues, PRs, and comments\",\n )\n plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n return gr.update(value=plot, visible=True)\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n with gr.Box():\n gr.Markdown(\"## Select libraries to display\")\n libraries = gr.CheckboxGroup(choices=LIBRARIES, label=\"\")\n with gr.Column():\n with gr.Box():\n gr.Markdown(\"## Select graphs to display\")\n pip = gr.CheckboxGroup(choices=[\"Pip\", \"Cumulated\"], label=\"\")\n stars = gr.CheckboxGroup(choices=[\"Stars\", \"Week over Week\"], label=\"\")\n issues = gr.CheckboxGroup(choices=[\"Issue\", \"Exclude org members\", \"week over week\"], label=\"\")\n with gr.Row():\n fetch = gr.Button(value=\"Fetch\")\n with gr.Row():\n with gr.Column():\n pip_plot = gr.Plot(visible=False)\n star_plot = gr.Plot(visible=False)\n issue_plot = gr.Plot(visible=False)\n\n fetch.click(create_pip_plot, inputs=[libraries, pip], outputs=pip_plot)\n fetch.click(create_star_plot, inputs=[libraries, stars], outputs=star_plot)\n fetch.click(create_issue_plot, inputs=[libraries, issues], outputs=issue_plot)\n\n\nif __name__ == \"__main__\":\n demo.launch()", - "text": "This demo shows how you can build an interactive dashboard with gradio. Click on a python library on the left hand side and then on the right hand side click on the metric you'd like to see plot over time. Data is pulled from HuggingFace Hub datasets." - }, - { - "name": "Dashboard with Live Updates", - "dir": "live_dashboard", - "code": "import math\n\nimport pandas as pd\n\nimport gradio as gr\nimport datetime\nimport numpy as np\n\n\ndef get_time():\n return datetime.datetime.now()\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2 * math.pi * period * x)\n update = gr.LinePlot.update(\n value=pd.DataFrame({\"x\": x, \"y\": y}),\n x=\"x\",\n y=\"y\",\n title=\"Plot (updates every second)\",\n width=600,\n height=350,\n )\n plot_end += 2 * math.pi\n if plot_end > 1000:\n plot_end = 2 * math.pi\n return update\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n c_time2 = gr.Textbox(label=\"Current Time refreshed every second\")\n gr.Textbox(\n \"Change the value of the slider to automatically update the plot\",\n label=\"\",\n )\n period = gr.Slider(\n label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1\n )\n plot = gr.LinePlot(show_label=False)\n with gr.Column():\n name = gr.Textbox(label=\"Enter your name\")\n greeting = gr.Textbox(label=\"Greeting\")\n button = gr.Button(value=\"Greet\")\n button.click(lambda s: f\"Hello {s}\", name, greeting)\n\n demo.load(lambda: datetime.datetime.now(), None, c_time2, every=1)\n dep = demo.load(get_plot, None, plot, every=1)\n period.change(get_plot, period, plot, every=1, cancels=[dep])\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n", - "text": "This demo shows how you can build a live interactive dashboard with gradio.\nThe current time is refreshed every second and the plot every half second by using the 'every' keyword in the event handler.\nChanging the value of the slider will control the period of the sine curve (the distance between peaks). " - }, - { - "name": "Interactive Map of AirBnB Locations", - "dir": "map_airbnb", - "code": "import gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = filtered_df[\"name\"].tolist()\n prices = filtered_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n fig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=filtered_df['latitude'].tolist(),\n lon=filtered_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\nif __name__ == \"__main__\":\n demo.launch()", - "text": "Display an interactive map of AirBnB locations with Plotly. Data is hosted on HuggingFace Datasets. " - }, - { - "name": "Outbreak Forecast", - "dir": "outbreak_forecast", - "code": "import altair\n\nimport gradio as gr\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport plotly.express as px\nimport pandas as pd\n\n\ndef outbreak(plot_type, r, month, countries, social_distancing):\n months = [\"January\", \"February\", \"March\", \"April\", \"May\"]\n m = months.index(month)\n start_day = 30 * m\n final_day = 30 * (m + 1)\n x = np.arange(start_day, final_day + 1)\n pop_count = {\"USA\": 350, \"Canada\": 40, \"Mexico\": 300, \"UK\": 120}\n if social_distancing:\n r = sqrt(r)\n df = pd.DataFrame({\"day\": x})\n for country in countries:\n df[country] = x ** (r) * (pop_count[country] + 1)\n\n if plot_type == \"Matplotlib\":\n fig = plt.figure()\n plt.plot(df[\"day\"], df[countries].to_numpy())\n plt.title(\"Outbreak in \" + month)\n plt.ylabel(\"Cases\")\n plt.xlabel(\"Days since Day 0\")\n plt.legend(countries)\n return fig\n elif plot_type == \"Plotly\":\n fig = px.line(df, x=\"day\", y=countries)\n fig.update_layout(\n title=\"Outbreak in \" + month,\n xaxis_title=\"Cases\",\n yaxis_title=\"Days Since Day 0\",\n )\n return fig\n elif plot_type == \"Altair\":\n df = df.melt(id_vars=\"day\").rename(columns={\"variable\": \"country\"})\n fig = altair.Chart(df).mark_line().encode(x=\"day\", y='value', color='country')\n return fig\n else:\n raise ValueError(\"A plot type must be selected\")\n\n\ninputs = [\n gr.Dropdown([\"Matplotlib\", \"Plotly\", \"Altair\"], label=\"Plot Type\"),\n gr.Slider(1, 4, 3.2, label=\"R\"),\n gr.Dropdown([\"January\", \"February\", \"March\", \"April\", \"May\"], label=\"Month\"),\n gr.CheckboxGroup(\n [\"USA\", \"Canada\", \"Mexico\", \"UK\"], label=\"Countries\", value=[\"USA\", \"Canada\"]\n ),\n gr.Checkbox(label=\"Social Distancing?\"),\n]\noutputs = gr.Plot()\n\ndemo = gr.Interface(\n fn=outbreak,\n inputs=inputs,\n outputs=outputs,\n examples=[\n [\"Matplotlib\", 2, \"March\", [\"Mexico\", \"UK\"], True],\n [\"Altair\", 2, \"March\", [\"Mexico\", \"Canada\"], True],\n [\"Plotly\", 3.6, \"February\", [\"Canada\", \"Mexico\", \"UK\"], False],\n ],\n cache_examples=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n\n\n", - "text": "Generate a plot based on 5 inputs." - }, - { - "name": "Clustering with Scikit-Learn", - "dir": "clustering", - "code": "import gradio as gr\nimport math\nfrom functools import partial\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import (\n AgglomerativeClustering, Birch, DBSCAN, KMeans, MeanShift, OPTICS, SpectralClustering, estimate_bandwidth\n)\nfrom sklearn.datasets import make_blobs, make_circles, make_moons\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.neighbors import kneighbors_graph\nfrom sklearn.preprocessing import StandardScaler\n\nplt.style.use('seaborn')\nSEED = 0\nMAX_CLUSTERS = 10\nN_SAMPLES = 1000\nN_COLS = 3\nFIGSIZE = 7, 7 # does not affect size in webpage\nCOLORS = [\n 'blue', 'orange', 'green', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan'\n]\nassert len(COLORS) >= MAX_CLUSTERS, \"Not enough different colors for all clusters\"\nnp.random.seed(SEED)\n\n\ndef normalize(X):\n return StandardScaler().fit_transform(X)\n\ndef get_regular(n_clusters):\n # spiral pattern\n centers = [\n [0, 0],\n [1, 0],\n [1, 1],\n [0, 1],\n [-1, 1],\n [-1, 0],\n [-1, -1],\n [0, -1],\n [1, -1],\n [2, -1],\n ][:n_clusters]\n assert len(centers) == n_clusters\n X, labels = make_blobs(n_samples=N_SAMPLES, centers=centers, cluster_std=0.25, random_state=SEED)\n return normalize(X), labels\n\n\ndef get_circles(n_clusters):\n X, labels = make_circles(n_samples=N_SAMPLES, factor=0.5, noise=0.05, random_state=SEED)\n return normalize(X), labels\n\n\ndef get_moons(n_clusters):\n X, labels = make_moons(n_samples=N_SAMPLES, noise=0.05, random_state=SEED)\n return normalize(X), labels\n\n\ndef get_noise(n_clusters):\n np.random.seed(SEED)\n X, labels = np.random.rand(N_SAMPLES, 2), np.random.randint(0, n_clusters, size=(N_SAMPLES,))\n return normalize(X), labels\n\n\ndef get_anisotropic(n_clusters):\n X, labels = make_blobs(n_samples=N_SAMPLES, centers=n_clusters, random_state=170)\n transformation = [[0.6, -0.6], [-0.4, 0.8]]\n X = np.dot(X, transformation)\n return X, labels\n\n\ndef get_varied(n_clusters):\n cluster_std = [1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0][:n_clusters]\n assert len(cluster_std) == n_clusters\n X, labels = make_blobs(\n n_samples=N_SAMPLES, centers=n_clusters, cluster_std=cluster_std, random_state=SEED\n )\n return normalize(X), labels\n\n\ndef get_spiral(n_clusters):\n # from https://scikit-learn.org/stable/auto_examples/cluster/plot_agglomerative_clustering.html\n np.random.seed(SEED)\n t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, N_SAMPLES))\n x = t * np.cos(t)\n y = t * np.sin(t)\n X = np.concatenate((x, y))\n X += 0.7 * np.random.randn(2, N_SAMPLES)\n X = np.ascontiguousarray(X.T)\n\n labels = np.zeros(N_SAMPLES, dtype=int)\n return normalize(X), labels\n\n\nDATA_MAPPING = {\n 'regular': get_regular,\n 'circles': get_circles,\n 'moons': get_moons,\n 'spiral': get_spiral,\n 'noise': get_noise,\n 'anisotropic': get_anisotropic,\n 'varied': get_varied,\n}\n\n\ndef get_groundtruth_model(X, labels, n_clusters, **kwargs):\n # dummy model to show true label distribution\n class Dummy:\n def __init__(self, y):\n self.labels_ = labels\n\n return Dummy(labels)\n\n\ndef get_kmeans(X, labels, n_clusters, **kwargs):\n model = KMeans(init=\"k-means++\", n_clusters=n_clusters, n_init=10, random_state=SEED)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_dbscan(X, labels, n_clusters, **kwargs):\n model = DBSCAN(eps=0.3)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_agglomerative(X, labels, n_clusters, **kwargs):\n connectivity = kneighbors_graph(\n X, n_neighbors=n_clusters, include_self=False\n )\n # make connectivity symmetric\n connectivity = 0.5 * (connectivity + connectivity.T)\n model = AgglomerativeClustering(\n n_clusters=n_clusters, linkage=\"ward\", connectivity=connectivity\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_meanshift(X, labels, n_clusters, **kwargs):\n bandwidth = estimate_bandwidth(X, quantile=0.25)\n model = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_spectral(X, labels, n_clusters, **kwargs):\n model = SpectralClustering(\n n_clusters=n_clusters,\n eigen_solver=\"arpack\",\n affinity=\"nearest_neighbors\",\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_optics(X, labels, n_clusters, **kwargs):\n model = OPTICS(\n min_samples=7,\n xi=0.05,\n min_cluster_size=0.1,\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_birch(X, labels, n_clusters, **kwargs):\n model = Birch(n_clusters=n_clusters)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_gaussianmixture(X, labels, n_clusters, **kwargs):\n model = GaussianMixture(\n n_components=n_clusters, covariance_type=\"full\", random_state=SEED,\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\nMODEL_MAPPING = {\n 'True labels': get_groundtruth_model,\n 'KMeans': get_kmeans,\n 'DBSCAN': get_dbscan,\n 'MeanShift': get_meanshift,\n 'SpectralClustering': get_spectral,\n 'OPTICS': get_optics,\n 'Birch': get_birch,\n 'GaussianMixture': get_gaussianmixture,\n 'AgglomerativeClustering': get_agglomerative,\n}\n\n\ndef plot_clusters(ax, X, labels):\n set_clusters = set(labels)\n set_clusters.discard(-1) # -1 signifiies outliers, which we plot separately\n for label, color in zip(sorted(set_clusters), COLORS):\n idx = labels == label\n if not sum(idx):\n continue\n ax.scatter(X[idx, 0], X[idx, 1], color=color)\n\n # show outliers (if any)\n idx = labels == -1\n if sum(idx):\n ax.scatter(X[idx, 0], X[idx, 1], c='k', marker='x')\n\n ax.grid(None)\n ax.set_xticks([])\n ax.set_yticks([])\n return ax\n\n\ndef cluster(dataset: str, n_clusters: int, clustering_algorithm: str):\n if isinstance(n_clusters, dict):\n n_clusters = n_clusters['value']\n else:\n n_clusters = int(n_clusters)\n\n X, labels = DATA_MAPPING[dataset](n_clusters)\n model = MODEL_MAPPING[clustering_algorithm](X, labels, n_clusters=n_clusters)\n if hasattr(model, \"labels_\"):\n y_pred = model.labels_.astype(int)\n else:\n y_pred = model.predict(X)\n\n fig, ax = plt.subplots(figsize=FIGSIZE)\n\n plot_clusters(ax, X, y_pred)\n ax.set_title(clustering_algorithm, fontsize=16)\n\n return fig\n\n\ntitle = \"Clustering with Scikit-learn\"\ndescription = (\n \"This example shows how different clustering algorithms work. Simply pick \"\n \"the dataset and the number of clusters to see how the clustering algorithms work. \"\n \"Colored circles are (predicted) labels and black x are outliers.\"\n)\n\n\ndef iter_grid(n_rows, n_cols):\n # create a grid using gradio Block\n for _ in range(n_rows):\n with gr.Row():\n for _ in range(n_cols):\n with gr.Column():\n yield\n\nwith gr.Blocks(title=title) as demo:\n gr.HTML(f\"{title}\")\n gr.Markdown(description)\n\n input_models = list(MODEL_MAPPING)\n input_data = gr.Radio(\n list(DATA_MAPPING),\n value=\"regular\",\n label=\"dataset\"\n )\n input_n_clusters = gr.Slider(\n minimum=1,\n maximum=MAX_CLUSTERS,\n value=4,\n step=1,\n label='Number of clusters'\n )\n n_rows = int(math.ceil(len(input_models) / N_COLS))\n counter = 0\n for _ in iter_grid(n_rows, N_COLS):\n if counter >= len(input_models):\n break\n\n input_model = input_models[counter]\n plot = gr.Plot(label=input_model)\n fn = partial(cluster, clustering_algorithm=input_model)\n input_data.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n input_n_clusters.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n counter += 1\n\ndemo.launch()\n", - "text": "This demo built with Blocks generates 9 plots based on the input." - }, - { - "name": "Time Series Forecasting", - "dir": "timeseries-forecasting-with-prophet", - "code": "import gradio as gr\nimport pypistats\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\nimport pandas as pd\nfrom prophet import Prophet\npd.options.plotting.backend = \"plotly\"\n\ndef get_forecast(lib, time):\n\n data = pypistats.overall(lib, total=True, format=\"pandas\")\n data = data.groupby(\"category\").get_group(\"with_mirrors\").sort_values(\"date\")\n start_date = date.today() - relativedelta(months=int(time.split(\" \")[0]))\n df = data[(data['date'] > str(start_date))] \n\n df1 = df[['date','downloads']]\n df1.columns = ['ds','y']\n\n m = Prophet()\n m.fit(df1)\n future = m.make_future_dataframe(periods=90)\n forecast = m.predict(future)\n fig1 = m.plot(forecast)\n return fig1 \n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n **Pypi Download Stats \ud83d\udcc8 with Prophet Forecasting**: see live download stats for popular open-source libraries \ud83e\udd17 along with a 3 month forecast using Prophet. The [ source code for this Gradio demo is here](https://huggingface.co/spaces/gradio/timeseries-forecasting-with-prophet/blob/main/app.py).\n \"\"\")\n with gr.Row():\n lib = gr.Dropdown([\"pandas\", \"scikit-learn\", \"torch\", \"prophet\"], label=\"Library\", value=\"pandas\")\n time = gr.Dropdown([\"3 months\", \"6 months\", \"9 months\", \"12 months\"], label=\"Downloads over the last...\", value=\"12 months\")\n\n plt = gr.Plot()\n\n lib.change(get_forecast, [lib, time], plt, queue=False)\n time.change(get_forecast, [lib, time], plt, queue=False) \n demo.load(get_forecast, [lib, time], plt, queue=False) \n\ndemo.launch()", - "text": "A simple dashboard showing pypi stats for python libraries. Updates on load, and has no buttons!" - }, - { - "name": "Income Classification with XGBoost", - "dir": "xgboost-income-prediction-with-explainability", - "code": "import gradio as gr\nimport random\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport shap\nimport xgboost as xgb\nfrom datasets import load_dataset\n\n\ndataset = load_dataset(\"scikit-learn/adult-census-income\")\nX_train = dataset[\"train\"].to_pandas()\n_ = X_train.pop(\"fnlwgt\")\n_ = X_train.pop(\"race\")\ny_train = X_train.pop(\"income\")\ny_train = (y_train == \">50K\").astype(int)\ncategorical_columns = [\n \"workclass\",\n \"education\",\n \"marital.status\",\n \"occupation\",\n \"relationship\",\n \"sex\",\n \"native.country\",\n]\nX_train = X_train.astype({col: \"category\" for col in categorical_columns})\ndata = xgb.DMatrix(X_train, label=y_train, enable_categorical=True)\nmodel = xgb.train(params={\"objective\": \"binary:logistic\"}, dtrain=data)\nexplainer = shap.TreeExplainer(model)\n\ndef predict(*args):\n df = pd.DataFrame([args], columns=X_train.columns)\n df = df.astype({col: \"category\" for col in categorical_columns})\n pos_pred = model.predict(xgb.DMatrix(df, enable_categorical=True))\n return {\">50K\": float(pos_pred[0]), \"<=50K\": 1 - float(pos_pred[0])}\n\n\ndef interpret(*args):\n df = pd.DataFrame([args], columns=X_train.columns)\n df = df.astype({col: \"category\" for col in categorical_columns})\n shap_values = explainer.shap_values(xgb.DMatrix(df, enable_categorical=True))\n scores_desc = list(zip(shap_values[0], X_train.columns))\n scores_desc = sorted(scores_desc)\n fig_m = plt.figure(tight_layout=True)\n plt.barh([s[1] for s in scores_desc], [s[0] for s in scores_desc])\n plt.title(\"Feature Shap Values\")\n plt.ylabel(\"Shap Value\")\n plt.xlabel(\"Feature\")\n plt.tight_layout()\n return fig_m\n\n\nunique_class = sorted(X_train[\"workclass\"].unique())\nunique_education = sorted(X_train[\"education\"].unique())\nunique_marital_status = sorted(X_train[\"marital.status\"].unique())\nunique_relationship = sorted(X_train[\"relationship\"].unique())\nunique_occupation = sorted(X_train[\"occupation\"].unique())\nunique_sex = sorted(X_train[\"sex\"].unique())\nunique_country = sorted(X_train[\"native.country\"].unique())\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"\"\"\n **Income Classification with XGBoost \ud83d\udcb0**: This demo uses an XGBoost classifier predicts income based on demographic factors, along with Shapley value-based *explanations*. The [source code for this Gradio demo is here](https://huggingface.co/spaces/gradio/xgboost-income-prediction-with-explainability/blob/main/app.py).\n \"\"\")\n with gr.Row():\n with gr.Column():\n age = gr.Slider(label=\"Age\", minimum=17, maximum=90, step=1, randomize=True)\n work_class = gr.Dropdown(\n label=\"Workclass\",\n choices=unique_class,\n value=lambda: random.choice(unique_class),\n )\n education = gr.Dropdown(\n label=\"Education Level\",\n choices=unique_education,\n value=lambda: random.choice(unique_education),\n )\n years = gr.Slider(\n label=\"Years of schooling\",\n minimum=1,\n maximum=16,\n step=1,\n randomize=True,\n )\n marital_status = gr.Dropdown(\n label=\"Marital Status\",\n choices=unique_marital_status,\n value=lambda: random.choice(unique_marital_status),\n )\n occupation = gr.Dropdown(\n label=\"Occupation\",\n choices=unique_occupation,\n value=lambda: random.choice(unique_occupation),\n )\n relationship = gr.Dropdown(\n label=\"Relationship Status\",\n choices=unique_relationship,\n value=lambda: random.choice(unique_relationship),\n )\n sex = gr.Dropdown(\n label=\"Sex\", choices=unique_sex, value=lambda: random.choice(unique_sex)\n )\n capital_gain = gr.Slider(\n label=\"Capital Gain\",\n minimum=0,\n maximum=100000,\n step=500,\n randomize=True,\n )\n capital_loss = gr.Slider(\n label=\"Capital Loss\", minimum=0, maximum=10000, step=500, randomize=True\n )\n hours_per_week = gr.Slider(\n label=\"Hours Per Week Worked\", minimum=1, maximum=99, step=1\n )\n country = gr.Dropdown(\n label=\"Native Country\",\n choices=unique_country,\n value=lambda: random.choice(unique_country),\n )\n with gr.Column():\n label = gr.Label()\n plot = gr.Plot()\n with gr.Row():\n predict_btn = gr.Button(value=\"Predict\")\n interpret_btn = gr.Button(value=\"Explain\")\n predict_btn.click(\n predict,\n inputs=[\n age,\n work_class,\n education,\n years,\n marital_status,\n occupation,\n relationship,\n sex,\n capital_gain,\n capital_loss,\n hours_per_week,\n country,\n ],\n outputs=[label],\n )\n interpret_btn.click(\n interpret,\n inputs=[\n age,\n work_class,\n education,\n years,\n marital_status,\n occupation,\n relationship,\n sex,\n capital_gain,\n capital_loss,\n hours_per_week,\n country,\n ],\n outputs=[plot],\n )\n\ndemo.launch()\n", - "text": "This demo takes in 12 inputs from the user in dropdowns and sliders and predicts income. It also has a separate button for explaining the prediction." - }, - { - "name": "Leaderboard", - "dir": "leaderboard", - "code": "import gradio as gr\nimport requests\nimport pandas as pd\nfrom huggingface_hub.hf_api import SpaceInfo\npath = f\"https://huggingface.co/api/spaces\"\n\n\ndef get_blocks_party_spaces():\n r = requests.get(path)\n d = r.json()\n spaces = [SpaceInfo(**x) for x in d]\n blocks_spaces = {}\n for i in range(0,len(spaces)):\n if spaces[i].id.split('/')[0] == 'Gradio-Blocks' and hasattr(spaces[i], 'likes') and spaces[i].id != 'Gradio-Blocks/Leaderboard' and spaces[i].id != 'Gradio-Blocks/README':\n blocks_spaces[spaces[i].id]=spaces[i].likes\n df = pd.DataFrame(\n [{\"Spaces_Name\": Spaces, \"likes\": likes} for Spaces,likes in blocks_spaces.items()])\n df = df.sort_values(by=['likes'],ascending=False)\n return df\n\nblock = gr.Blocks()\n\nwith block: \n gr.Markdown(\"\"\"Leaderboard for the most popular Blocks Event Spaces. To learn more and join, see Blocks Party Event\"\"\")\n with gr.Tabs():\n with gr.TabItem(\"Blocks Party Leaderboard\"):\n with gr.Row():\n data = gr.outputs.Dataframe(type=\"pandas\")\n with gr.Row():\n data_run = gr.Button(\"Refresh\")\n data_run.click(get_blocks_party_spaces, inputs=None, outputs=data)\n # running the function on page load in addition to when the button is clicked\n block.load(get_blocks_party_spaces, inputs=None, outputs=data) \n\nblock.launch()\n\n", - "text": "A simple dashboard ranking spaces by number of likes." - }, - { - "name": "Tax Calculator", - "dir": "tax_calculator", - "code": "import gradio as gr\n\ndef tax_calculator(income, marital_status, assets):\n tax_brackets = [(10, 0), (25, 8), (60, 12), (120, 20), (250, 30)]\n total_deductible = sum(assets[\"Cost\"])\n taxable_income = income - total_deductible\n\n total_tax = 0\n for bracket, rate in tax_brackets:\n if taxable_income > bracket:\n total_tax += (taxable_income - bracket) * rate / 100\n\n if marital_status == \"Married\":\n total_tax *= 0.75\n elif marital_status == \"Divorced\":\n total_tax *= 0.8\n\n return round(total_tax)\n\ndemo = gr.Interface(\n tax_calculator,\n [\n \"number\",\n gr.Radio([\"Single\", \"Married\", \"Divorced\"]),\n gr.Dataframe(\n headers=[\"Item\", \"Cost\"],\n datatype=[\"str\", \"number\"],\n label=\"Assets Purchased this Year\",\n ),\n ],\n \"number\",\n examples=[\n [10000, \"Married\", [[\"Suit\", 5000], [\"Laptop\", 800], [\"Car\", 1800]]],\n [80000, \"Single\", [[\"Suit\", 800], [\"Watch\", 1800], [\"Car\", 800]]],\n ],\n)\n\ndemo.launch()\n", - "text": "Calculate taxes using Textbox, Radio, and Dataframe components" - } - ] - }, - { - "category": "\ud83c\udfa4 Audio & Speech", - "demos": [ - { - "name": "Text to Speech", - "dir": "neon-tts-plugin-coqui", - "code": "import tempfile\nimport gradio as gr\nfrom neon_tts_plugin_coqui import CoquiTTS\n\nLANGUAGES = list(CoquiTTS.langs.keys())\ncoquiTTS = CoquiTTS()\n\ndef tts(text: str, language: str):\n with tempfile.NamedTemporaryFile(suffix=\".wav\", delete=False) as fp:\n coquiTTS.get_tts(text, fp, speaker = {\"language\" : language})\n return fp.name\n\ninputs = [gr.Textbox(label=\"Input\", value=CoquiTTS.langs[\"en\"][\"sentence\"], max_lines=3), \n gr.Radio(label=\"Language\", choices=LANGUAGES, value=\"en\")]\noutputs = gr.Audio(label=\"Output\")\n\ndemo = gr.Interface(fn=tts, inputs=inputs, outputs=outputs)\n\ndemo.launch()", - "text": "This demo converts text to speech in 14 languages." - }, - { - "name": "Speech to Text (ASR)", - "dir": "automatic-speech-recognition", - "code": "import gradio as gr\nimport os\n\n# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\nauth_token = os.getenv(\"auth_token\")\n\n# automatically load the interface from a HF model \n# you can remove the api_key parameter if you don't care about rate limiting. \ndemo = gr.load(\n \"huggingface/facebook/wav2vec2-base-960h\",\n title=\"Speech-to-text\",\n inputs=\"mic\",\n description=\"Let me try to guess what you're saying!\",\n hf_token=auth_token\n)\n\ndemo.launch()\n", - "text": "Automatic speech recognition English. Record from your microphone and the app will transcribe the audio." - }, - { - "name": "Musical Instrument Identification", - "dir": "musical_instrument_identification", - "code": "import gradio as gr\nimport torch\nimport torchaudio\nfrom timeit import default_timer as timer\nfrom data_setups import audio_preprocess, resample\nimport gdown\n\nurl = 'https://drive.google.com/uc?id=1X5CR18u0I-ZOi_8P0cNptCe5JGk9Ro0C'\noutput = 'piano.wav'\ngdown.download(url, output, quiet=False)\nurl = 'https://drive.google.com/uc?id=1W-8HwmGR5SiyDbUcGAZYYDKdCIst07__'\noutput= 'torch_efficientnet_fold2_CNN.pth'\ngdown.download(url, output, quiet=False)\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nSAMPLE_RATE = 44100\nAUDIO_LEN = 2.90\nmodel = torch.load(\"torch_efficientnet_fold2_CNN.pth\", map_location=torch.device('cpu'))\nLABELS = [\n \"Cello\", \"Clarinet\", \"Flute\", \"Acoustic Guitar\", \"Electric Guitar\", \"Organ\", \"Piano\", \"Saxophone\", \"Trumpet\", \"Violin\", \"Voice\"\n]\nexample_list = [\n [\"piano.wav\"]\n]\n\n\ndef predict(audio_path):\n start_time = timer()\n wavform, sample_rate = torchaudio.load(audio_path)\n wav = resample(wavform, sample_rate, SAMPLE_RATE)\n if len(wav) > int(AUDIO_LEN * SAMPLE_RATE):\n wav = wav[:int(AUDIO_LEN * SAMPLE_RATE)]\n else:\n print(f\"input length {len(wav)} too small!, need over {int(AUDIO_LEN * SAMPLE_RATE)}\")\n return\n img = audio_preprocess(wav, SAMPLE_RATE).unsqueeze(0)\n model.eval()\n with torch.inference_mode():\n pred_probs = torch.softmax(model(img), dim=1)\n pred_labels_and_probs = {LABELS[i]: float(pred_probs[0][i]) for i in range(len(LABELS))}\n pred_time = round(timer() - start_time, 5)\n return pred_labels_and_probs, pred_time\n\ndemo = gr.Interface(fn=predict,\n inputs=gr.Audio(type=\"filepath\"),\n outputs=[gr.Label(num_top_classes=11, label=\"Predictions\"), \n gr.Number(label=\"Prediction time (s)\")],\n examples=example_list,\n cache_examples=False\n )\n\ndemo.launch(debug=False)\n", - "text": "This demo identifies musical instruments from an audio file. It uses Gradio's Audio and Label components." - }, - { - "name": "Speaker Verification", - "dir": "same-person-or-different", - "code": "import gradio as gr\nimport torch\nfrom torchaudio.sox_effects import apply_effects_file\nfrom transformers import AutoFeatureExtractor, AutoModelForAudioXVector\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nOUTPUT_OK = (\n \"\"\"\n
\n

The speakers are

\n

{:.1f}%

\n

similar

\n

Welcome, human!

\n
(You must get at least 85% to be considered the same person)
\n
\n\"\"\"\n)\nOUTPUT_FAIL = (\n \"\"\"\n
\n

The speakers are

\n

{:.1f}%

\n

similar

\n

You shall not pass!

\n
(You must get at least 85% to be considered the same person)
\n
\n\"\"\"\n)\n\nEFFECTS = [\n [\"remix\", \"-\"],\n [\"channels\", \"1\"],\n [\"rate\", \"16000\"],\n [\"gain\", \"-1.0\"],\n [\"silence\", \"1\", \"0.1\", \"0.1%\", \"-1\", \"0.1\", \"0.1%\"],\n [\"trim\", \"0\", \"10\"],\n]\n\nTHRESHOLD = 0.85\n\nmodel_name = \"microsoft/unispeech-sat-base-plus-sv\"\nfeature_extractor = AutoFeatureExtractor.from_pretrained(model_name)\nmodel = AutoModelForAudioXVector.from_pretrained(model_name).to(device)\ncosine_sim = torch.nn.CosineSimilarity(dim=-1)\n\n\ndef similarity_fn(path1, path2):\n if not (path1 and path2):\n return 'ERROR: Please record audio for *both* speakers!'\n\n wav1, _ = apply_effects_file(path1, EFFECTS)\n wav2, _ = apply_effects_file(path2, EFFECTS)\n print(wav1.shape, wav2.shape)\n\n input1 = feature_extractor(wav1.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n input2 = feature_extractor(wav2.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n\n with torch.no_grad():\n emb1 = model(input1).embeddings\n emb2 = model(input2).embeddings\n emb1 = torch.nn.functional.normalize(emb1, dim=-1).cpu()\n emb2 = torch.nn.functional.normalize(emb2, dim=-1).cpu()\n similarity = cosine_sim(emb1, emb2).numpy()[0]\n\n if similarity >= THRESHOLD:\n output = OUTPUT_OK.format(similarity * 100)\n else:\n output = OUTPUT_FAIL.format(similarity * 100)\n\n return output\n\ninputs = [\n gr.inputs.Audio(source=\"microphone\", type=\"filepath\", optional=True, label=\"Speaker #1\"),\n gr.inputs.Audio(source=\"microphone\", type=\"filepath\", optional=True, label=\"Speaker #2\"),\n]\noutput = gr.outputs.HTML(label=\"\")\n\n\ndescription = (\n \"This demo from Microsoft will compare two speech samples and determine if they are from the same speaker. \"\n \"Try it with your own voice!\"\n)\narticle = (\n \"

\"\n \"\ud83c\udf99\ufe0f Learn more about UniSpeech-SAT | \"\n \"\ud83d\udcda UniSpeech-SAT paper | \"\n \"\ud83d\udcda X-Vector paper\"\n \"

\"\n)\nexamples = [\n [\"samples/cate_blanch.mp3\", \"samples/cate_blanch_2.mp3\"],\n [\"samples/cate_blanch.mp3\", \"samples/heath_ledger.mp3\"],\n]\n\ninterface = gr.Interface(\n fn=similarity_fn,\n inputs=inputs,\n outputs=output,\n layout=\"horizontal\",\n allow_flagging=False,\n live=False,\n examples=examples,\n cache_examples=False\n)\ninterface.launch()\n", - "text": "This demo identifies if two speakers are the same person using Gradio's Audio and HTML components." - } - ] - } -] +[{"category": "\ud83d\udd8a\ufe0f Text & Natural Language Processing", "demos": [{"name": "Hello World", "dir": "hello_world", "code": "import gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n \nif __name__ == \"__main__\":\n demo.launch() ", "text": "The simplest possible Gradio demo. It wraps a 'Hello {name}!' function in an Interface that accepts and returns text."}, {"name": "Text Generation", "dir": "text_generation", "code": "import gradio as gr\nfrom transformers import pipeline\n\ngenerator = pipeline('text-generation', model='gpt2')\n\ndef generate(text):\n result = generator(text, max_length=30, num_return_sequences=1)\n return result[0][\"generated_text\"]\n\nexamples = [\n [\"The Moon's orbit around Earth has\"],\n [\"The smooth Borealis basin in the Northern Hemisphere covers 40%\"],\n]\n\ndemo = gr.Interface(\n fn=generate,\n inputs=gr.inputs.Textbox(lines=5, label=\"Input Text\"),\n outputs=gr.outputs.Textbox(label=\"Generated Text\"),\n examples=examples\n)\n\ndemo.launch()\n", "text": "This text generation demo takes in input text and returns generated text. It uses the Transformers library to set up the model and has two examples."}, {"name": "Autocomplete", "dir": "autocomplete", "code": "import gradio as gr\nimport os\n\n# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\nauth_token = os.getenv(\"auth_token\")\n\n# load a model from https://hf.co/models as an interface, then use it as an api \n# you can remove the api_key parameter if you don't care about rate limiting. \napi = gr.load(\"huggingface/gpt2-xl\", hf_token=auth_token)\n\ndef complete_with_gpt(text):\n return text[:-50] + api(text[-50:])\n\nwith gr.Blocks() as demo:\n textbox = gr.Textbox(placeholder=\"Type here...\", lines=4)\n btn = gr.Button(\"Autocomplete\")\n \n # define what will run when the button is clicked, here the textbox is used as both an input and an output\n btn.click(fn=complete_with_gpt, inputs=textbox, outputs=textbox, queue=False)\n\ndemo.launch()", "text": "This text generation demo works like autocomplete. There's only one textbox and it's used for both the input and the output. The demo loads the model as an interface, and uses that interface as an API. It then uses blocks to create the UI. All of this is done in less than 10 lines of code."}, {"name": "Sentiment Analysis", "dir": "sentiment_analysis", "code": "import gradio as gr\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\nnltk.download(\"vader_lexicon\")\nsid = SentimentIntensityAnalyzer()\n\ndef sentiment_analysis(text):\n scores = sid.polarity_scores(text)\n del scores[\"compound\"]\n return scores\n\ndemo = gr.Interface(\n fn=sentiment_analysis, \n inputs=gr.Textbox(placeholder=\"Enter a positive or negative sentence here...\"), \n outputs=\"label\", \n interpretation=\"default\",\n examples=[[\"This is wonderful!\"]])\n\ndemo.launch()", "text": "This sentiment analaysis demo takes in input text and returns its classification for either positive, negative or neutral using Gradio's Label output. It also uses the default interpretation method so users can click the Interpret button after a submission and see which words had the biggest effect on the output."}, {"name": "Named Entity Recognition", "dir": "text_analysis", "code": "import gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n doc = nlp(text)\n html = displacy.render(doc, style=\"dep\", page=True)\n html = (\n \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n", "text": "This simple demo takes advantage of Gradio's HighlightedText, JSON and HTML outputs to create a clear NER segmentation."}, {"name": "Multilingual Translation", "dir": "translation", "code": "import gradio as gr\nfrom transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline\nimport torch\n\n# this model was loaded from https://hf.co/models\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"facebook/nllb-200-distilled-600M\")\ntokenizer = AutoTokenizer.from_pretrained(\"facebook/nllb-200-distilled-600M\")\ndevice = 0 if torch.cuda.is_available() else -1\nLANGS = [\"ace_Arab\", \"eng_Latn\", \"fra_Latn\", \"spa_Latn\"]\n\ndef translate(text, src_lang, tgt_lang):\n \"\"\"\n Translate the text from source lang to target lang\n \"\"\"\n translation_pipeline = pipeline(\"translation\", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device)\n result = translation_pipeline(text)\n return result[0]['translation_text']\n\ndemo = gr.Interface(\n fn=translate,\n inputs=[\n gr.components.Textbox(label=\"Text\"),\n gr.components.Dropdown(label=\"Source Language\", choices=LANGS),\n gr.components.Dropdown(label=\"Target Language\", choices=LANGS),\n ],\n outputs=[\"text\"],\n examples=[[\"Building a translation demo with Gradio is so easy!\", \"eng_Latn\", \"spa_Latn\"]],\n cache_examples=False,\n title=\"Translation Demo\",\n description=\"This demo is a simplified version of the original [NLLB-Translator](https://huggingface.co/spaces/Narrativaai/NLLB-Translator) space\"\n)\n\ndemo.launch()", "text": "This translation demo takes in the text, source and target languages, and returns the translation. It uses the Transformers library to set up the model and has a title, description, and example."}]}, {"category": "\ud83d\uddbc\ufe0f Images & Computer Vision", "demos": [{"name": "Image Classification", "dir": "image_classification", "code": "import gradio as gr\nimport torch\nimport requests\nfrom torchvision import transforms\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n inp = transforms.ToTensor()(inp).unsqueeze(0)\n with torch.no_grad():\n prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)} \n return confidences\n\ndemo = gr.Interface(fn=predict, \n inputs=gr.inputs.Image(type=\"pil\"),\n outputs=gr.outputs.Label(num_top_classes=3),\n examples=[[\"cheetah.jpg\"]],\n )\n \ndemo.launch()", "text": "Simple image classification in Pytorch with Gradio's Image input and Label output."}, {"name": "Image Segmentation", "dir": "image_segmentation", "code": "import gradio as gr\nimport numpy as np\nimport random\n\nwith gr.Blocks() as demo:\n section_labels = [\n \"apple\",\n \"banana\",\n \"carrot\",\n \"donut\",\n \"eggplant\",\n \"fish\",\n \"grapes\",\n \"hamburger\",\n \"ice cream\",\n \"juice\",\n ]\n\n with gr.Row():\n num_boxes = gr.Slider(0, 5, 2, step=1, label=\"Number of boxes\")\n num_segments = gr.Slider(0, 5, 1, step=1, label=\"Number of segments\")\n\n with gr.Row():\n img_input = gr.Image()\n img_output = gr.AnnotatedImage().style(\n color_map={\"banana\": \"#a89a00\", \"carrot\": \"#ffae00\"}\n )\n\n section_btn = gr.Button(\"Identify Sections\")\n selected_section = gr.Textbox(label=\"Selected Section\")\n\n def section(img, num_boxes, num_segments):\n sections = []\n for a in range(num_boxes):\n x = random.randint(0, img.shape[1])\n y = random.randint(0, img.shape[0])\n w = random.randint(0, img.shape[1] - x)\n h = random.randint(0, img.shape[0] - y)\n sections.append(((x, y, x + w, y + h), section_labels[a]))\n for b in range(num_segments):\n x = random.randint(0, img.shape[1])\n y = random.randint(0, img.shape[0])\n r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))\n mask = np.zeros(img.shape[:2])\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n dist_square = (i - y) ** 2 + (j - x) ** 2\n if dist_square < r**2:\n mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4\n sections.append((mask, section_labels[b + num_boxes]))\n return (img, sections)\n\n section_btn.click(section, [img_input, num_boxes, num_segments], img_output)\n\n def select_section(evt: gr.SelectData):\n return section_labels[evt.index]\n\n img_output.select(select_section, None, selected_section)\n\nif __name__ == \"__main__\":\n demo.launch()\n", "text": "Simple image segmentation using gradio's AnnotatedImage component."}, {"name": "Image Transformation with AnimeGAN", "dir": "animeganv2", "code": "import gradio as gr\nimport torch\n\nmodel2 = torch.hub.load(\n \"AK391/animegan2-pytorch:main\",\n \"generator\",\n pretrained=True,\n progress=False\n)\nmodel1 = torch.hub.load(\"AK391/animegan2-pytorch:main\", \"generator\", pretrained=\"face_paint_512_v1\")\nface2paint = torch.hub.load(\n 'AK391/animegan2-pytorch:main', 'face2paint', \n size=512,side_by_side=False\n)\n\ndef inference(img, ver):\n if ver == 'version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)':\n out = face2paint(model2, img)\n else:\n out = face2paint(model1, img)\n return out\n\ntitle = \"AnimeGANv2\"\ndescription = \"Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below.\"\narticle = \"

Github Repo Pytorch

visitor badge

\"\nexamples=[['groot.jpeg','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'],['gongyoo.jpeg','version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)']]\n\ndemo = gr.Interface(\n fn=inference, \n inputs=[gr.inputs.Image(type=\"pil\"),gr.inputs.Radio(['version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'], type=\"value\", default='version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)', label='version')], \n outputs=gr.outputs.Image(type=\"pil\"),\n title=title,\n description=description,\n article=article,\n examples=examples)\n\ndemo.launch()", "text": "Recreate the viral AnimeGAN image transformation demo."}, {"name": "Image Generation (Fake GAN)", "dir": "fake_gan", "code": "# This demo needs to be run from the repo folder.\n# python demo/fake_gan/run.py\nimport random\n\nimport gradio as gr\n\n\ndef fake_gan():\n images = [\n (random.choice(\n [\n \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n ]\n ), f\"label {i}\" if i != 0 else \"label\" * 50)\n for i in range(3)\n ]\n return images\n\n\nwith gr.Blocks() as demo:\n with gr.Column(variant=\"panel\"):\n with gr.Row(variant=\"compact\"):\n text = gr.Textbox(\n label=\"Enter your prompt\",\n show_label=False,\n max_lines=1,\n placeholder=\"Enter your prompt\",\n ).style(\n container=False,\n )\n btn = gr.Button(\"Generate image\").style(full_width=False)\n\n gallery = gr.Gallery(\n label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n ).style(columns=[2], rows=[2], object_fit=\"contain\", height=\"auto\")\n\n btn.click(fake_gan, None, gallery)\n\nif __name__ == \"__main__\":\n demo.launch()\n", "text": "This is a fake GAN that shows how to create a text-to-image interface for image generation. Check out the Stable Diffusion demo for more: https://hf.co/spaces/stabilityai/stable-diffusion/"}, {"name": "Iterative Output", "dir": "fake_diffusion", "code": "import gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n for _ in range(steps):\n time.sleep(1)\n image = np.random.random((600, 600, 3))\n yield image\n image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n", "text": "This demo uses a fake model to showcase iterative output. The Image output will update every time a generator is returned until the final image."}, {"name": "3D Models", "dir": "depth_estimation", "code": "import gradio as gr\nfrom transformers import DPTFeatureExtractor, DPTForDepthEstimation\nimport torch\nimport numpy as np\nfrom PIL import Image\nimport open3d as o3d\nfrom pathlib import Path\n\nfeature_extractor = DPTFeatureExtractor.from_pretrained(\"Intel/dpt-large\")\nmodel = DPTForDepthEstimation.from_pretrained(\"Intel/dpt-large\")\n\ndef process_image(image_path):\n image_path = Path(image_path)\n image_raw = Image.open(image_path)\n image = image_raw.resize(\n (800, int(800 * image_raw.size[1] / image_raw.size[0])),\n Image.Resampling.LANCZOS)\n\n # prepare image for the model\n encoding = feature_extractor(image, return_tensors=\"pt\")\n\n # forward pass\n with torch.no_grad():\n outputs = model(**encoding)\n predicted_depth = outputs.predicted_depth\n\n # interpolate to original size\n prediction = torch.nn.functional.interpolate(\n predicted_depth.unsqueeze(1),\n size=image.size[::-1],\n mode=\"bicubic\",\n align_corners=False,\n ).squeeze()\n output = prediction.cpu().numpy()\n depth_image = (output * 255 / np.max(output)).astype('uint8')\n try:\n gltf_path = create_3d_obj(np.array(image), depth_image, image_path)\n img = Image.fromarray(depth_image)\n return [img, gltf_path, gltf_path]\n except Exception:\n gltf_path = create_3d_obj(\n np.array(image), depth_image, image_path, depth=8)\n img = Image.fromarray(depth_image)\n return [img, gltf_path, gltf_path]\n except:\n print(\"Error reconstructing 3D model\")\n raise Exception(\"Error reconstructing 3D model\")\n\n\ndef create_3d_obj(rgb_image, depth_image, image_path, depth=10):\n depth_o3d = o3d.geometry.Image(depth_image)\n image_o3d = o3d.geometry.Image(rgb_image)\n rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n image_o3d, depth_o3d, convert_rgb_to_intensity=False)\n w = int(depth_image.shape[1])\n h = int(depth_image.shape[0])\n\n camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()\n camera_intrinsic.set_intrinsics(w, h, 500, 500, w/2, h/2)\n\n pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n rgbd_image, camera_intrinsic)\n\n print('normals')\n pcd.normals = o3d.utility.Vector3dVector(\n np.zeros((1, 3))) # invalidate existing normals\n pcd.estimate_normals(\n search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=30))\n pcd.orient_normals_towards_camera_location(\n camera_location=np.array([0., 0., 1000.]))\n pcd.transform([[1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1]])\n pcd.transform([[-1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n print('run Poisson surface reconstruction')\n with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug):\n mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(\n pcd, depth=depth, width=0, scale=1.1, linear_fit=True)\n\n voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / 256\n print(f'voxel_size = {voxel_size:e}')\n mesh = mesh_raw.simplify_vertex_clustering(\n voxel_size=voxel_size,\n contraction=o3d.geometry.SimplificationContraction.Average)\n\n # vertices_to_remove = densities < np.quantile(densities, 0.001)\n # mesh.remove_vertices_by_mask(vertices_to_remove)\n bbox = pcd.get_axis_aligned_bounding_box()\n mesh_crop = mesh.crop(bbox)\n gltf_path = f'./{image_path.stem}.gltf'\n o3d.io.write_triangle_mesh(\n gltf_path, mesh_crop, write_triangle_uvs=True)\n return gltf_path\n\ntitle = \"Demo: zero-shot depth estimation with DPT + 3D Point Cloud\"\ndescription = \"This demo is a variation from the original DPT Demo. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object.\"\nexamples = [[\"examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\"]]\n\niface = gr.Interface(fn=process_image,\n inputs=[gr.Image(\n type=\"filepath\", label=\"Input Image\")],\n outputs=[gr.Image(label=\"predicted depth\", type=\"pil\"),\n gr.Model3D(label=\"3d mesh reconstruction\", clear_color=[\n 1.0, 1.0, 1.0, 1.0]),\n gr.File(label=\"3d gLTF\")],\n title=title,\n description=description,\n examples=examples,\n allow_flagging=\"never\",\n cache_examples=False)\n\niface.launch(debug=True, enable_queue=False)", "text": "A demo for predicting the depth of an image and generating a 3D model of it."}]}, {"category": "\ud83d\udcc8 Tabular Data & Plots", "demos": [{"name": "Interactive Dashboard", "dir": "dashboard", "code": "import gradio as gr\nimport pandas as pd\nimport plotly.express as px\nfrom helpers import *\n\n\nLIBRARIES = [\"accelerate\", \"datasets\", \"diffusers\", \"evaluate\", \"gradio\", \"hub_docs\",\n \"huggingface_hub\", \"optimum\", \"pytorch_image_models\", \"tokenizers\", \"transformers\"]\n\n\ndef create_pip_plot(libraries, pip_choices):\n if \"Pip\" not in pip_choices:\n return gr.update(visible=False)\n output = retrieve_pip_installs(libraries, \"Cumulated\" in pip_choices)\n df = pd.DataFrame(output).melt(id_vars=\"day\")\n plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n title=\"Pip installs\")\n plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n return gr.update(value=plot, visible=True)\n\n\ndef create_star_plot(libraries, star_choices):\n if \"Stars\" not in star_choices:\n return gr.update(visible=False)\n output = retrieve_stars(libraries, \"Week over Week\" in star_choices)\n df = pd.DataFrame(output).melt(id_vars=\"day\")\n plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n title=\"Number of stargazers\")\n plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n return gr.update(value=plot, visible=True)\n\n\ndef create_issue_plot(libraries, issue_choices):\n if \"Issue\" not in issue_choices:\n return gr.update(visible=False)\n output = retrieve_issues(libraries,\n exclude_org_members=\"Exclude org members\" in issue_choices,\n week_over_week=\"Week over Week\" in issue_choices)\n df = pd.DataFrame(output).melt(id_vars=\"day\")\n plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n title=\"Cumulated number of issues, PRs, and comments\",\n )\n plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n return gr.update(value=plot, visible=True)\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n with gr.Box():\n gr.Markdown(\"## Select libraries to display\")\n libraries = gr.CheckboxGroup(choices=LIBRARIES, label=\"\")\n with gr.Column():\n with gr.Box():\n gr.Markdown(\"## Select graphs to display\")\n pip = gr.CheckboxGroup(choices=[\"Pip\", \"Cumulated\"], label=\"\")\n stars = gr.CheckboxGroup(choices=[\"Stars\", \"Week over Week\"], label=\"\")\n issues = gr.CheckboxGroup(choices=[\"Issue\", \"Exclude org members\", \"week over week\"], label=\"\")\n with gr.Row():\n fetch = gr.Button(value=\"Fetch\")\n with gr.Row():\n with gr.Column():\n pip_plot = gr.Plot(visible=False)\n star_plot = gr.Plot(visible=False)\n issue_plot = gr.Plot(visible=False)\n\n fetch.click(create_pip_plot, inputs=[libraries, pip], outputs=pip_plot)\n fetch.click(create_star_plot, inputs=[libraries, stars], outputs=star_plot)\n fetch.click(create_issue_plot, inputs=[libraries, issues], outputs=issue_plot)\n\n\nif __name__ == \"__main__\":\n demo.launch()", "text": "This demo shows how you can build an interactive dashboard with gradio. Click on a python library on the left hand side and then on the right hand side click on the metric you'd like to see plot over time. Data is pulled from HuggingFace Hub datasets."}, {"name": "Dashboard with Live Updates", "dir": "live_dashboard", "code": "import math\n\nimport pandas as pd\n\nimport gradio as gr\nimport datetime\nimport numpy as np\n\n\ndef get_time():\n return datetime.datetime.now()\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2 * math.pi * period * x)\n update = gr.LinePlot.update(\n value=pd.DataFrame({\"x\": x, \"y\": y}),\n x=\"x\",\n y=\"y\",\n title=\"Plot (updates every second)\",\n width=600,\n height=350,\n )\n plot_end += 2 * math.pi\n if plot_end > 1000:\n plot_end = 2 * math.pi\n return update\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n c_time2 = gr.Textbox(label=\"Current Time refreshed every second\")\n gr.Textbox(\n \"Change the value of the slider to automatically update the plot\",\n label=\"\",\n )\n period = gr.Slider(\n label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1\n )\n plot = gr.LinePlot(show_label=False)\n with gr.Column():\n name = gr.Textbox(label=\"Enter your name\")\n greeting = gr.Textbox(label=\"Greeting\")\n button = gr.Button(value=\"Greet\")\n button.click(lambda s: f\"Hello {s}\", name, greeting)\n\n demo.load(lambda: datetime.datetime.now(), None, c_time2, every=1)\n dep = demo.load(get_plot, None, plot, every=1)\n period.change(get_plot, period, plot, every=1, cancels=[dep])\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n", "text": "This demo shows how you can build a live interactive dashboard with gradio.\nThe current time is refreshed every second and the plot every half second by using the 'every' keyword in the event handler.\nChanging the value of the slider will control the period of the sine curve (the distance between peaks). "}, {"name": "Interactive Map of AirBnB Locations", "dir": "map_airbnb", "code": "import gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = filtered_df[\"name\"].tolist()\n prices = filtered_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n fig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=filtered_df['latitude'].tolist(),\n lon=filtered_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\nif __name__ == \"__main__\":\n demo.launch()", "text": "Display an interactive map of AirBnB locations with Plotly. Data is hosted on HuggingFace Datasets. "}, {"name": "Outbreak Forecast", "dir": "outbreak_forecast", "code": "import altair\n\nimport gradio as gr\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport plotly.express as px\nimport pandas as pd\n\n\ndef outbreak(plot_type, r, month, countries, social_distancing):\n months = [\"January\", \"February\", \"March\", \"April\", \"May\"]\n m = months.index(month)\n start_day = 30 * m\n final_day = 30 * (m + 1)\n x = np.arange(start_day, final_day + 1)\n pop_count = {\"USA\": 350, \"Canada\": 40, \"Mexico\": 300, \"UK\": 120}\n if social_distancing:\n r = sqrt(r)\n df = pd.DataFrame({\"day\": x})\n for country in countries:\n df[country] = x ** (r) * (pop_count[country] + 1)\n\n if plot_type == \"Matplotlib\":\n fig = plt.figure()\n plt.plot(df[\"day\"], df[countries].to_numpy())\n plt.title(\"Outbreak in \" + month)\n plt.ylabel(\"Cases\")\n plt.xlabel(\"Days since Day 0\")\n plt.legend(countries)\n return fig\n elif plot_type == \"Plotly\":\n fig = px.line(df, x=\"day\", y=countries)\n fig.update_layout(\n title=\"Outbreak in \" + month,\n xaxis_title=\"Cases\",\n yaxis_title=\"Days Since Day 0\",\n )\n return fig\n elif plot_type == \"Altair\":\n df = df.melt(id_vars=\"day\").rename(columns={\"variable\": \"country\"})\n fig = altair.Chart(df).mark_line().encode(x=\"day\", y='value', color='country')\n return fig\n else:\n raise ValueError(\"A plot type must be selected\")\n\n\ninputs = [\n gr.Dropdown([\"Matplotlib\", \"Plotly\", \"Altair\"], label=\"Plot Type\"),\n gr.Slider(1, 4, 3.2, label=\"R\"),\n gr.Dropdown([\"January\", \"February\", \"March\", \"April\", \"May\"], label=\"Month\"),\n gr.CheckboxGroup(\n [\"USA\", \"Canada\", \"Mexico\", \"UK\"], label=\"Countries\", value=[\"USA\", \"Canada\"]\n ),\n gr.Checkbox(label=\"Social Distancing?\"),\n]\noutputs = gr.Plot()\n\ndemo = gr.Interface(\n fn=outbreak,\n inputs=inputs,\n outputs=outputs,\n examples=[\n [\"Matplotlib\", 2, \"March\", [\"Mexico\", \"UK\"], True],\n [\"Altair\", 2, \"March\", [\"Mexico\", \"Canada\"], True],\n [\"Plotly\", 3.6, \"February\", [\"Canada\", \"Mexico\", \"UK\"], False],\n ],\n cache_examples=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n\n\n", "text": "Generate a plot based on 5 inputs."}, {"name": "Clustering with Scikit-Learn", "dir": "clustering", "code": "import gradio as gr\nimport math\nfrom functools import partial\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import (\n AgglomerativeClustering, Birch, DBSCAN, KMeans, MeanShift, OPTICS, SpectralClustering, estimate_bandwidth\n)\nfrom sklearn.datasets import make_blobs, make_circles, make_moons\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.neighbors import kneighbors_graph\nfrom sklearn.preprocessing import StandardScaler\n\nplt.style.use('seaborn')\nSEED = 0\nMAX_CLUSTERS = 10\nN_SAMPLES = 1000\nN_COLS = 3\nFIGSIZE = 7, 7 # does not affect size in webpage\nCOLORS = [\n 'blue', 'orange', 'green', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan'\n]\nassert len(COLORS) >= MAX_CLUSTERS, \"Not enough different colors for all clusters\"\nnp.random.seed(SEED)\n\n\ndef normalize(X):\n return StandardScaler().fit_transform(X)\n\ndef get_regular(n_clusters):\n # spiral pattern\n centers = [\n [0, 0],\n [1, 0],\n [1, 1],\n [0, 1],\n [-1, 1],\n [-1, 0],\n [-1, -1],\n [0, -1],\n [1, -1],\n [2, -1],\n ][:n_clusters]\n assert len(centers) == n_clusters\n X, labels = make_blobs(n_samples=N_SAMPLES, centers=centers, cluster_std=0.25, random_state=SEED)\n return normalize(X), labels\n\n\ndef get_circles(n_clusters):\n X, labels = make_circles(n_samples=N_SAMPLES, factor=0.5, noise=0.05, random_state=SEED)\n return normalize(X), labels\n\n\ndef get_moons(n_clusters):\n X, labels = make_moons(n_samples=N_SAMPLES, noise=0.05, random_state=SEED)\n return normalize(X), labels\n\n\ndef get_noise(n_clusters):\n np.random.seed(SEED)\n X, labels = np.random.rand(N_SAMPLES, 2), np.random.randint(0, n_clusters, size=(N_SAMPLES,))\n return normalize(X), labels\n\n\ndef get_anisotropic(n_clusters):\n X, labels = make_blobs(n_samples=N_SAMPLES, centers=n_clusters, random_state=170)\n transformation = [[0.6, -0.6], [-0.4, 0.8]]\n X = np.dot(X, transformation)\n return X, labels\n\n\ndef get_varied(n_clusters):\n cluster_std = [1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0][:n_clusters]\n assert len(cluster_std) == n_clusters\n X, labels = make_blobs(\n n_samples=N_SAMPLES, centers=n_clusters, cluster_std=cluster_std, random_state=SEED\n )\n return normalize(X), labels\n\n\ndef get_spiral(n_clusters):\n # from https://scikit-learn.org/stable/auto_examples/cluster/plot_agglomerative_clustering.html\n np.random.seed(SEED)\n t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, N_SAMPLES))\n x = t * np.cos(t)\n y = t * np.sin(t)\n X = np.concatenate((x, y))\n X += 0.7 * np.random.randn(2, N_SAMPLES)\n X = np.ascontiguousarray(X.T)\n\n labels = np.zeros(N_SAMPLES, dtype=int)\n return normalize(X), labels\n\n\nDATA_MAPPING = {\n 'regular': get_regular,\n 'circles': get_circles,\n 'moons': get_moons,\n 'spiral': get_spiral,\n 'noise': get_noise,\n 'anisotropic': get_anisotropic,\n 'varied': get_varied,\n}\n\n\ndef get_groundtruth_model(X, labels, n_clusters, **kwargs):\n # dummy model to show true label distribution\n class Dummy:\n def __init__(self, y):\n self.labels_ = labels\n\n return Dummy(labels)\n\n\ndef get_kmeans(X, labels, n_clusters, **kwargs):\n model = KMeans(init=\"k-means++\", n_clusters=n_clusters, n_init=10, random_state=SEED)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_dbscan(X, labels, n_clusters, **kwargs):\n model = DBSCAN(eps=0.3)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_agglomerative(X, labels, n_clusters, **kwargs):\n connectivity = kneighbors_graph(\n X, n_neighbors=n_clusters, include_self=False\n )\n # make connectivity symmetric\n connectivity = 0.5 * (connectivity + connectivity.T)\n model = AgglomerativeClustering(\n n_clusters=n_clusters, linkage=\"ward\", connectivity=connectivity\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_meanshift(X, labels, n_clusters, **kwargs):\n bandwidth = estimate_bandwidth(X, quantile=0.25)\n model = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_spectral(X, labels, n_clusters, **kwargs):\n model = SpectralClustering(\n n_clusters=n_clusters,\n eigen_solver=\"arpack\",\n affinity=\"nearest_neighbors\",\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_optics(X, labels, n_clusters, **kwargs):\n model = OPTICS(\n min_samples=7,\n xi=0.05,\n min_cluster_size=0.1,\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_birch(X, labels, n_clusters, **kwargs):\n model = Birch(n_clusters=n_clusters)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_gaussianmixture(X, labels, n_clusters, **kwargs):\n model = GaussianMixture(\n n_components=n_clusters, covariance_type=\"full\", random_state=SEED,\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\nMODEL_MAPPING = {\n 'True labels': get_groundtruth_model,\n 'KMeans': get_kmeans,\n 'DBSCAN': get_dbscan,\n 'MeanShift': get_meanshift,\n 'SpectralClustering': get_spectral,\n 'OPTICS': get_optics,\n 'Birch': get_birch,\n 'GaussianMixture': get_gaussianmixture,\n 'AgglomerativeClustering': get_agglomerative,\n}\n\n\ndef plot_clusters(ax, X, labels):\n set_clusters = set(labels)\n set_clusters.discard(-1) # -1 signifiies outliers, which we plot separately\n for label, color in zip(sorted(set_clusters), COLORS):\n idx = labels == label\n if not sum(idx):\n continue\n ax.scatter(X[idx, 0], X[idx, 1], color=color)\n\n # show outliers (if any)\n idx = labels == -1\n if sum(idx):\n ax.scatter(X[idx, 0], X[idx, 1], c='k', marker='x')\n\n ax.grid(None)\n ax.set_xticks([])\n ax.set_yticks([])\n return ax\n\n\ndef cluster(dataset: str, n_clusters: int, clustering_algorithm: str):\n if isinstance(n_clusters, dict):\n n_clusters = n_clusters['value']\n else:\n n_clusters = int(n_clusters)\n\n X, labels = DATA_MAPPING[dataset](n_clusters)\n model = MODEL_MAPPING[clustering_algorithm](X, labels, n_clusters=n_clusters)\n if hasattr(model, \"labels_\"):\n y_pred = model.labels_.astype(int)\n else:\n y_pred = model.predict(X)\n\n fig, ax = plt.subplots(figsize=FIGSIZE)\n\n plot_clusters(ax, X, y_pred)\n ax.set_title(clustering_algorithm, fontsize=16)\n\n return fig\n\n\ntitle = \"Clustering with Scikit-learn\"\ndescription = (\n \"This example shows how different clustering algorithms work. Simply pick \"\n \"the dataset and the number of clusters to see how the clustering algorithms work. \"\n \"Colored circles are (predicted) labels and black x are outliers.\"\n)\n\n\ndef iter_grid(n_rows, n_cols):\n # create a grid using gradio Block\n for _ in range(n_rows):\n with gr.Row():\n for _ in range(n_cols):\n with gr.Column():\n yield\n\nwith gr.Blocks(title=title) as demo:\n gr.HTML(f\"{title}\")\n gr.Markdown(description)\n\n input_models = list(MODEL_MAPPING)\n input_data = gr.Radio(\n list(DATA_MAPPING),\n value=\"regular\",\n label=\"dataset\"\n )\n input_n_clusters = gr.Slider(\n minimum=1,\n maximum=MAX_CLUSTERS,\n value=4,\n step=1,\n label='Number of clusters'\n )\n n_rows = int(math.ceil(len(input_models) / N_COLS))\n counter = 0\n for _ in iter_grid(n_rows, N_COLS):\n if counter >= len(input_models):\n break\n\n input_model = input_models[counter]\n plot = gr.Plot(label=input_model)\n fn = partial(cluster, clustering_algorithm=input_model)\n input_data.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n input_n_clusters.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n counter += 1\n\ndemo.launch()\n", "text": "This demo built with Blocks generates 9 plots based on the input."}, {"name": "Time Series Forecasting", "dir": "timeseries-forecasting-with-prophet", "code": "import gradio as gr\nimport pypistats\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\nimport pandas as pd\nfrom prophet import Prophet\npd.options.plotting.backend = \"plotly\"\n\ndef get_forecast(lib, time):\n\n data = pypistats.overall(lib, total=True, format=\"pandas\")\n data = data.groupby(\"category\").get_group(\"with_mirrors\").sort_values(\"date\")\n start_date = date.today() - relativedelta(months=int(time.split(\" \")[0]))\n df = data[(data['date'] > str(start_date))] \n\n df1 = df[['date','downloads']]\n df1.columns = ['ds','y']\n\n m = Prophet()\n m.fit(df1)\n future = m.make_future_dataframe(periods=90)\n forecast = m.predict(future)\n fig1 = m.plot(forecast)\n return fig1 \n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n **Pypi Download Stats \ud83d\udcc8 with Prophet Forecasting**: see live download stats for popular open-source libraries \ud83e\udd17 along with a 3 month forecast using Prophet. The [ source code for this Gradio demo is here](https://huggingface.co/spaces/gradio/timeseries-forecasting-with-prophet/blob/main/app.py).\n \"\"\")\n with gr.Row():\n lib = gr.Dropdown([\"pandas\", \"scikit-learn\", \"torch\", \"prophet\"], label=\"Library\", value=\"pandas\")\n time = gr.Dropdown([\"3 months\", \"6 months\", \"9 months\", \"12 months\"], label=\"Downloads over the last...\", value=\"12 months\")\n\n plt = gr.Plot()\n\n lib.change(get_forecast, [lib, time], plt, queue=False)\n time.change(get_forecast, [lib, time], plt, queue=False) \n demo.load(get_forecast, [lib, time], plt, queue=False) \n\ndemo.launch()", "text": "A simple dashboard showing pypi stats for python libraries. Updates on load, and has no buttons!"}, {"name": "Income Classification with XGBoost", "dir": "xgboost-income-prediction-with-explainability", "code": "import gradio as gr\nimport random\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport shap\nimport xgboost as xgb\nfrom datasets import load_dataset\n\n\ndataset = load_dataset(\"scikit-learn/adult-census-income\")\nX_train = dataset[\"train\"].to_pandas()\n_ = X_train.pop(\"fnlwgt\")\n_ = X_train.pop(\"race\")\ny_train = X_train.pop(\"income\")\ny_train = (y_train == \">50K\").astype(int)\ncategorical_columns = [\n \"workclass\",\n \"education\",\n \"marital.status\",\n \"occupation\",\n \"relationship\",\n \"sex\",\n \"native.country\",\n]\nX_train = X_train.astype({col: \"category\" for col in categorical_columns})\ndata = xgb.DMatrix(X_train, label=y_train, enable_categorical=True)\nmodel = xgb.train(params={\"objective\": \"binary:logistic\"}, dtrain=data)\nexplainer = shap.TreeExplainer(model)\n\ndef predict(*args):\n df = pd.DataFrame([args], columns=X_train.columns)\n df = df.astype({col: \"category\" for col in categorical_columns})\n pos_pred = model.predict(xgb.DMatrix(df, enable_categorical=True))\n return {\">50K\": float(pos_pred[0]), \"<=50K\": 1 - float(pos_pred[0])}\n\n\ndef interpret(*args):\n df = pd.DataFrame([args], columns=X_train.columns)\n df = df.astype({col: \"category\" for col in categorical_columns})\n shap_values = explainer.shap_values(xgb.DMatrix(df, enable_categorical=True))\n scores_desc = list(zip(shap_values[0], X_train.columns))\n scores_desc = sorted(scores_desc)\n fig_m = plt.figure(tight_layout=True)\n plt.barh([s[1] for s in scores_desc], [s[0] for s in scores_desc])\n plt.title(\"Feature Shap Values\")\n plt.ylabel(\"Shap Value\")\n plt.xlabel(\"Feature\")\n plt.tight_layout()\n return fig_m\n\n\nunique_class = sorted(X_train[\"workclass\"].unique())\nunique_education = sorted(X_train[\"education\"].unique())\nunique_marital_status = sorted(X_train[\"marital.status\"].unique())\nunique_relationship = sorted(X_train[\"relationship\"].unique())\nunique_occupation = sorted(X_train[\"occupation\"].unique())\nunique_sex = sorted(X_train[\"sex\"].unique())\nunique_country = sorted(X_train[\"native.country\"].unique())\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"\"\"\n **Income Classification with XGBoost \ud83d\udcb0**: This demo uses an XGBoost classifier predicts income based on demographic factors, along with Shapley value-based *explanations*. The [source code for this Gradio demo is here](https://huggingface.co/spaces/gradio/xgboost-income-prediction-with-explainability/blob/main/app.py).\n \"\"\")\n with gr.Row():\n with gr.Column():\n age = gr.Slider(label=\"Age\", minimum=17, maximum=90, step=1, randomize=True)\n work_class = gr.Dropdown(\n label=\"Workclass\",\n choices=unique_class,\n value=lambda: random.choice(unique_class),\n )\n education = gr.Dropdown(\n label=\"Education Level\",\n choices=unique_education,\n value=lambda: random.choice(unique_education),\n )\n years = gr.Slider(\n label=\"Years of schooling\",\n minimum=1,\n maximum=16,\n step=1,\n randomize=True,\n )\n marital_status = gr.Dropdown(\n label=\"Marital Status\",\n choices=unique_marital_status,\n value=lambda: random.choice(unique_marital_status),\n )\n occupation = gr.Dropdown(\n label=\"Occupation\",\n choices=unique_occupation,\n value=lambda: random.choice(unique_occupation),\n )\n relationship = gr.Dropdown(\n label=\"Relationship Status\",\n choices=unique_relationship,\n value=lambda: random.choice(unique_relationship),\n )\n sex = gr.Dropdown(\n label=\"Sex\", choices=unique_sex, value=lambda: random.choice(unique_sex)\n )\n capital_gain = gr.Slider(\n label=\"Capital Gain\",\n minimum=0,\n maximum=100000,\n step=500,\n randomize=True,\n )\n capital_loss = gr.Slider(\n label=\"Capital Loss\", minimum=0, maximum=10000, step=500, randomize=True\n )\n hours_per_week = gr.Slider(\n label=\"Hours Per Week Worked\", minimum=1, maximum=99, step=1\n )\n country = gr.Dropdown(\n label=\"Native Country\",\n choices=unique_country,\n value=lambda: random.choice(unique_country),\n )\n with gr.Column():\n label = gr.Label()\n plot = gr.Plot()\n with gr.Row():\n predict_btn = gr.Button(value=\"Predict\")\n interpret_btn = gr.Button(value=\"Explain\")\n predict_btn.click(\n predict,\n inputs=[\n age,\n work_class,\n education,\n years,\n marital_status,\n occupation,\n relationship,\n sex,\n capital_gain,\n capital_loss,\n hours_per_week,\n country,\n ],\n outputs=[label],\n )\n interpret_btn.click(\n interpret,\n inputs=[\n age,\n work_class,\n education,\n years,\n marital_status,\n occupation,\n relationship,\n sex,\n capital_gain,\n capital_loss,\n hours_per_week,\n country,\n ],\n outputs=[plot],\n )\n\ndemo.launch()\n", "text": "This demo takes in 12 inputs from the user in dropdowns and sliders and predicts income. It also has a separate button for explaining the prediction."}, {"name": "Leaderboard", "dir": "leaderboard", "code": "import gradio as gr\nimport requests\nimport pandas as pd\nfrom huggingface_hub.hf_api import SpaceInfo\npath = f\"https://huggingface.co/api/spaces\"\n\n\ndef get_blocks_party_spaces():\n r = requests.get(path)\n d = r.json()\n spaces = [SpaceInfo(**x) for x in d]\n blocks_spaces = {}\n for i in range(0,len(spaces)):\n if spaces[i].id.split('/')[0] == 'Gradio-Blocks' and hasattr(spaces[i], 'likes') and spaces[i].id != 'Gradio-Blocks/Leaderboard' and spaces[i].id != 'Gradio-Blocks/README':\n blocks_spaces[spaces[i].id]=spaces[i].likes\n df = pd.DataFrame(\n [{\"Spaces_Name\": Spaces, \"likes\": likes} for Spaces,likes in blocks_spaces.items()])\n df = df.sort_values(by=['likes'],ascending=False)\n return df\n\nblock = gr.Blocks()\n\nwith block: \n gr.Markdown(\"\"\"Leaderboard for the most popular Blocks Event Spaces. To learn more and join, see Blocks Party Event\"\"\")\n with gr.Tabs():\n with gr.TabItem(\"Blocks Party Leaderboard\"):\n with gr.Row():\n data = gr.outputs.Dataframe(type=\"pandas\")\n with gr.Row():\n data_run = gr.Button(\"Refresh\")\n data_run.click(get_blocks_party_spaces, inputs=None, outputs=data)\n # running the function on page load in addition to when the button is clicked\n block.load(get_blocks_party_spaces, inputs=None, outputs=data) \n\nblock.launch()\n\n", "text": "A simple dashboard ranking spaces by number of likes."}, {"name": "Tax Calculator", "dir": "tax_calculator", "code": "import gradio as gr\n\ndef tax_calculator(income, marital_status, assets):\n tax_brackets = [(10, 0), (25, 8), (60, 12), (120, 20), (250, 30)]\n total_deductible = sum(assets[\"Cost\"])\n taxable_income = income - total_deductible\n\n total_tax = 0\n for bracket, rate in tax_brackets:\n if taxable_income > bracket:\n total_tax += (taxable_income - bracket) * rate / 100\n\n if marital_status == \"Married\":\n total_tax *= 0.75\n elif marital_status == \"Divorced\":\n total_tax *= 0.8\n\n return round(total_tax)\n\ndemo = gr.Interface(\n tax_calculator,\n [\n \"number\",\n gr.Radio([\"Single\", \"Married\", \"Divorced\"]),\n gr.Dataframe(\n headers=[\"Item\", \"Cost\"],\n datatype=[\"str\", \"number\"],\n label=\"Assets Purchased this Year\",\n ),\n ],\n \"number\",\n examples=[\n [10000, \"Married\", [[\"Suit\", 5000], [\"Laptop\", 800], [\"Car\", 1800]]],\n [80000, \"Single\", [[\"Suit\", 800], [\"Watch\", 1800], [\"Car\", 800]]],\n ],\n)\n\ndemo.launch()\n", "text": "Calculate taxes using Textbox, Radio, and Dataframe components"}]}, {"category": "\ud83c\udfa4 Audio & Speech", "demos": [{"name": "Text to Speech", "dir": "neon-tts-plugin-coqui", "code": "import tempfile\nimport gradio as gr\nfrom neon_tts_plugin_coqui import CoquiTTS\n\nLANGUAGES = list(CoquiTTS.langs.keys())\ncoquiTTS = CoquiTTS()\n\ndef tts(text: str, language: str):\n with tempfile.NamedTemporaryFile(suffix=\".wav\", delete=False) as fp:\n coquiTTS.get_tts(text, fp, speaker = {\"language\" : language})\n return fp.name\n\ninputs = [gr.Textbox(label=\"Input\", value=CoquiTTS.langs[\"en\"][\"sentence\"], max_lines=3), \n gr.Radio(label=\"Language\", choices=LANGUAGES, value=\"en\")]\noutputs = gr.Audio(label=\"Output\")\n\ndemo = gr.Interface(fn=tts, inputs=inputs, outputs=outputs)\n\ndemo.launch()", "text": "This demo converts text to speech in 14 languages."}, {"name": "Speech to Text (ASR)", "dir": "automatic-speech-recognition", "code": "import gradio as gr\nimport os\n\n# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\nauth_token = os.getenv(\"auth_token\")\n\n# automatically load the interface from a HF model \n# you can remove the api_key parameter if you don't care about rate limiting. \ndemo = gr.load(\n \"huggingface/facebook/wav2vec2-base-960h\",\n title=\"Speech-to-text\",\n inputs=\"mic\",\n description=\"Let me try to guess what you're saying!\",\n hf_token=auth_token\n)\n\ndemo.launch()\n", "text": "Automatic speech recognition English. Record from your microphone and the app will transcribe the audio."}, {"name": "Musical Instrument Identification", "dir": "musical_instrument_identification", "code": "import gradio as gr\nimport torch\nimport torchaudio\nfrom timeit import default_timer as timer\nfrom data_setups import audio_preprocess, resample\nimport gdown\n\nurl = 'https://drive.google.com/uc?id=1X5CR18u0I-ZOi_8P0cNptCe5JGk9Ro0C'\noutput = 'piano.wav'\ngdown.download(url, output, quiet=False)\nurl = 'https://drive.google.com/uc?id=1W-8HwmGR5SiyDbUcGAZYYDKdCIst07__'\noutput= 'torch_efficientnet_fold2_CNN.pth'\ngdown.download(url, output, quiet=False)\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nSAMPLE_RATE = 44100\nAUDIO_LEN = 2.90\nmodel = torch.load(\"torch_efficientnet_fold2_CNN.pth\", map_location=torch.device('cpu'))\nLABELS = [\n \"Cello\", \"Clarinet\", \"Flute\", \"Acoustic Guitar\", \"Electric Guitar\", \"Organ\", \"Piano\", \"Saxophone\", \"Trumpet\", \"Violin\", \"Voice\"\n]\nexample_list = [\n [\"piano.wav\"]\n]\n\n\ndef predict(audio_path):\n start_time = timer()\n wavform, sample_rate = torchaudio.load(audio_path)\n wav = resample(wavform, sample_rate, SAMPLE_RATE)\n if len(wav) > int(AUDIO_LEN * SAMPLE_RATE):\n wav = wav[:int(AUDIO_LEN * SAMPLE_RATE)]\n else:\n print(f\"input length {len(wav)} too small!, need over {int(AUDIO_LEN * SAMPLE_RATE)}\")\n return\n img = audio_preprocess(wav, SAMPLE_RATE).unsqueeze(0)\n model.eval()\n with torch.inference_mode():\n pred_probs = torch.softmax(model(img), dim=1)\n pred_labels_and_probs = {LABELS[i]: float(pred_probs[0][i]) for i in range(len(LABELS))}\n pred_time = round(timer() - start_time, 5)\n return pred_labels_and_probs, pred_time\n\ndemo = gr.Interface(fn=predict,\n inputs=gr.Audio(type=\"filepath\"),\n outputs=[gr.Label(num_top_classes=11, label=\"Predictions\"), \n gr.Number(label=\"Prediction time (s)\")],\n examples=example_list,\n cache_examples=False\n )\n\ndemo.launch(debug=False)\n", "text": "This demo identifies musical instruments from an audio file. It uses Gradio's Audio and Label components."}, {"name": "Speaker Verification", "dir": "same-person-or-different", "code": "import gradio as gr\nimport torch\nfrom torchaudio.sox_effects import apply_effects_file\nfrom transformers import AutoFeatureExtractor, AutoModelForAudioXVector\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nOUTPUT_OK = (\n \"\"\"\n
\n

The speakers are

\n

{:.1f}%

\n

similar

\n

Welcome, human!

\n
(You must get at least 85% to be considered the same person)
\n
\n\"\"\"\n)\nOUTPUT_FAIL = (\n \"\"\"\n
\n

The speakers are

\n

{:.1f}%

\n

similar

\n

You shall not pass!

\n
(You must get at least 85% to be considered the same person)
\n
\n\"\"\"\n)\n\nEFFECTS = [\n [\"remix\", \"-\"],\n [\"channels\", \"1\"],\n [\"rate\", \"16000\"],\n [\"gain\", \"-1.0\"],\n [\"silence\", \"1\", \"0.1\", \"0.1%\", \"-1\", \"0.1\", \"0.1%\"],\n [\"trim\", \"0\", \"10\"],\n]\n\nTHRESHOLD = 0.85\n\nmodel_name = \"microsoft/unispeech-sat-base-plus-sv\"\nfeature_extractor = AutoFeatureExtractor.from_pretrained(model_name)\nmodel = AutoModelForAudioXVector.from_pretrained(model_name).to(device)\ncosine_sim = torch.nn.CosineSimilarity(dim=-1)\n\n\ndef similarity_fn(path1, path2):\n if not (path1 and path2):\n return 'ERROR: Please record audio for *both* speakers!'\n\n wav1, _ = apply_effects_file(path1, EFFECTS)\n wav2, _ = apply_effects_file(path2, EFFECTS)\n print(wav1.shape, wav2.shape)\n\n input1 = feature_extractor(wav1.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n input2 = feature_extractor(wav2.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n\n with torch.no_grad():\n emb1 = model(input1).embeddings\n emb2 = model(input2).embeddings\n emb1 = torch.nn.functional.normalize(emb1, dim=-1).cpu()\n emb2 = torch.nn.functional.normalize(emb2, dim=-1).cpu()\n similarity = cosine_sim(emb1, emb2).numpy()[0]\n\n if similarity >= THRESHOLD:\n output = OUTPUT_OK.format(similarity * 100)\n else:\n output = OUTPUT_FAIL.format(similarity * 100)\n\n return output\n\ninputs = [\n gr.inputs.Audio(source=\"microphone\", type=\"filepath\", optional=True, label=\"Speaker #1\"),\n gr.inputs.Audio(source=\"microphone\", type=\"filepath\", optional=True, label=\"Speaker #2\"),\n]\noutput = gr.outputs.HTML(label=\"\")\n\n\ndescription = (\n \"This demo from Microsoft will compare two speech samples and determine if they are from the same speaker. \"\n \"Try it with your own voice!\"\n)\narticle = (\n \"

\"\n \"\ud83c\udf99\ufe0f Learn more about UniSpeech-SAT | \"\n \"\ud83d\udcda UniSpeech-SAT paper | \"\n \"\ud83d\udcda X-Vector paper\"\n \"

\"\n)\nexamples = [\n [\"samples/cate_blanch.mp3\", \"samples/cate_blanch_2.mp3\"],\n [\"samples/cate_blanch.mp3\", \"samples/heath_ledger.mp3\"],\n]\n\ninterface = gr.Interface(\n fn=similarity_fn,\n inputs=inputs,\n outputs=output,\n layout=\"horizontal\",\n allow_flagging=False,\n live=False,\n examples=examples,\n cache_examples=False\n)\ninterface.launch()\n", "text": "This demo identifies if two speakers are the same person using Gradio's Audio and HTML components."}]}] \ No newline at end of file diff --git a/js/_website/src/routes/docs/docs.json b/js/_website/src/routes/docs/docs.json index 04e9cda481623..34a9fc6ffb60e 100644 --- a/js/_website/src/routes/docs/docs.json +++ b/js/_website/src/routes/docs/docs.json @@ -1,19453 +1 @@ -{ - "docs": { - "building": { - "simplecsvlogger": { - "class": null, - "name": "SimpleCSVLogger", - "description": "A simplified implementation of the FlaggingCallback abstract class provided for illustrative purposes. Each flagged sample (both the input and output data) is logged to a CSV file on the machine running the gradio app.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "import gradio as gr\ndef image_classifier(inp):\n return {'cat': 0.3, 'dog': 0.7}\ndemo = gr.Interface(fn=image_classifier, inputs=\"image\", outputs=\"label\",\n flagging_callback=SimpleCSVLogger())", - "fns": [], - "parent": "gradio" - }, - "csvlogger": { - "class": null, - "name": "CSVLogger", - "description": "The default implementation of the FlaggingCallback abstract class. Each flagged sample (both the input and output data) is logged to a CSV file with headers on the machine running the gradio app.", - "tags": { "guides": "using-flagging" }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "import gradio as gr\ndef image_classifier(inp):\n return {'cat': 0.3, 'dog': 0.7}\ndemo = gr.Interface(fn=image_classifier, inputs=\"image\", outputs=\"label\",\n flagging_callback=CSVLogger())", - "fns": [], - "guides": [ - { - "name": "using-flagging", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 45, - "pretty_name": "Using Flagging", - "content": "# Using Flagging\n\n\n\n\n## Introduction\n\nWhen you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.\n\nGradio simplifies the collection of this data by including a **Flag** button with every `Interface`. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with `gradio.Interface` as well as with `gradio.Blocks`.\n\n## The **Flag** button in `gradio.Interface`\n\nFlagging with Gradio's `Interface` is especially easy. By default, underneath the output components, there is a button marked **Flag**. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.\n\nThere are [four parameters](https://gradio.app/docs/#interface-header) in `gradio.Interface` that control how flagging works. We will go over them in greater detail.\n\n* `allow_flagging`: this parameter can be set to either `\"manual\"` (default), `\"auto\"`, or `\"never\"`. \n * `manual`: users will see a button to flag, and samples are only flagged when the button is clicked.\n * `auto`: users will not see a button to flag, but every sample will be flagged automatically. \n * `never`: users will not see a button to flag, and no sample will be flagged. \n* `flagging_options`: this parameter can be either `None` (default) or a list of strings.\n * If `None`, then the user simply clicks on the **Flag** button and no additional options are shown.\n * If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is `[\"Incorrect\", \"Ambiguous\"]`, then buttons labeled **Flag as Incorrect** and **Flag as Ambiguous** appear. This only applies if `allow_flagging` is `\"manual\"`.\n * The chosen option is then logged along with the input and output.\n* `flagging_dir`: this parameter takes a string.\n * It represents what to name the directory where flagged data is stored.\n* `flagging_callback`: this parameter takes an instance of a subclass of the `FlaggingCallback` class\n * Using this parameter allows you to write custom code that gets run when the flag button is clicked\n * By default, this is set to an instance of `gr.CSVLogger`\n * One example is setting it to an instance of `gr.HuggingFaceDatasetSaver` which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)\n\n## What happens to flagged data?\n\nWithin the directory provided by the `flagging_dir` argument, a CSV file will log the flagged data. \n\nHere's an example: The code below creates the calculator interface embedded below it:\n\n```python\nimport gradio as gr\n\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n return num1 / num2\n\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\"\n)\n\niface.launch()\n```\n\n\n\nWhen you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged. \n\n```directory\n+-- flagged/\n| +-- logs.csv\n```\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n```\n\nIf the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an `image` input to `image` output interface will create the following structure.\n\n```directory\n+-- flagged/\n| +-- logs.csv\n| +-- image/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n_flagged/logs.csv_\n```csv\nim,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.\n\nIf we go back to the calculator example, the following code will create the interface embedded below it. \n```python\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n```\n\n\nWhen users click the flag button, the csv file will now include a column indicating the selected option.\n\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n```\n\n## The HuggingFaceDatasetSaver Callback\n\nSometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.\n\nWe've made this super easy with the `flagging_callback` parameter.\n\nFor example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:\n\n\n```python\nimport os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n flagging_callback=hf_writer\n)\n\niface.launch()\n```\n\nNotice that we define our own \ninstance of `gradio.HuggingFaceDatasetSaver` using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set `allow_flagging=\"manual\"`\nbecause on Hugging Face Spaces, `allow_flagging` is set to `\"never\"` by default. Here's our demo:\n\n\n\nYou can now see all the examples flagged above in this [public Hugging Face dataset](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo).\n\n![flagging callback hf](https://github.com/gradio-app/gradio/blob/main/guides/assets/flagging-callback-hf.png?raw=true)\n\nWe created the `gradio.HuggingFaceDatasetSaver` class, but you can pass your own custom class as long as it inherits from `FLaggingCallback` defined in [this file](https://github.com/gradio-app/gradio/blob/master/gradio/flagging.py). If you create a cool callback, contribute it to the repo! \n\n## Flagging with Blocks\n\nWhat about if you are using `gradio.Blocks`? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.\n\nAt the same time, you might want to use an existing `FlaggingCallback` to avoid writing extra code.\nThis requires two steps:\n\n1. You have to run your callback's `.setup()` somewhere in the code prior to the \nfirst time you flag data\n2. When the flagging button is clicked, then you trigger the callback's `.flag()` method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing. \n\nHere is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default `CSVLogger`:\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n sepia_filter = strength * np.array(\n [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n ) + (1-strength) * np.identity(3)\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n img_input = gr.Image()\n strength = gr.Slider(0, 1, 0.5)\n img_output = gr.Image()\n with gr.Row():\n btn = gr.Button(\"Flag\")\n \n # This needs to be called at some point prior to the first call to callback.flag()\n callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n img_input.change(sepia, [img_input, strength], img_output)\n strength.change(sepia, [img_input, strength], img_output)\n \n # We can choose which components to flag -- in this case, we'll flag all of them\n btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n```\n\n\n## Privacy\n\nImportant Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use `allow_flagging=auto` (when all of the data submitted through the demo is being flagged)\n\n### That's all! Happy building :) \n", - "html": "

Using Flagging

\n\n

Introduction

\n\n

When you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.

\n\n

Gradio simplifies the collection of this data by including a Flag button with every Interface. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with gradio.Interface as well as with gradio.Blocks.

\n\n

The Flag button in gradio.Interface

\n\n

Flagging with Gradio's Interface is especially easy. By default, underneath the output components, there is a button marked Flag. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.

\n\n

There are four parameters in gradio.Interface that control how flagging works. We will go over them in greater detail.

\n\n
    \n
  • allow_flagging: this parameter can be set to either \"manual\" (default), \"auto\", or \"never\".
    \n
      \n
    • manual: users will see a button to flag, and samples are only flagged when the button is clicked.
    • \n
    • auto: users will not see a button to flag, but every sample will be flagged automatically.
    • \n
    • never: users will not see a button to flag, and no sample will be flagged.
    • \n
  • \n
  • flagging_options: this parameter can be either None (default) or a list of strings.\n
      \n
    • If None, then the user simply clicks on the Flag button and no additional options are shown.
    • \n
    • If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is [\"Incorrect\", \"Ambiguous\"], then buttons labeled Flag as Incorrect and Flag as Ambiguous appear. This only applies if allow_flagging is \"manual\".
    • \n
    • The chosen option is then logged along with the input and output.
    • \n
  • \n
  • flagging_dir: this parameter takes a string.\n
      \n
    • It represents what to name the directory where flagged data is stored.
    • \n
  • \n
  • flagging_callback: this parameter takes an instance of a subclass of the FlaggingCallback class\n
      \n
    • Using this parameter allows you to write custom code that gets run when the flag button is clicked
    • \n
    • By default, this is set to an instance of gr.CSVLogger
    • \n
    • One example is setting it to an instance of gr.HuggingFaceDatasetSaver which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)
    • \n
  • \n
\n\n

What happens to flagged data?

\n\n

Within the directory provided by the flagging_dir argument, a CSV file will log the flagged data.

\n\n

Here's an example: The code below creates the calculator interface embedded below it:

\n\n
import gradio as gr\n\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        return num1 / num2\n\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\"\n)\n\niface.launch()\n
\n\n

\n\n

When you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged.

\n\n
+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n
\n\n

If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an image input to image output interface will create the following structure.

\n\n
+-- flagged/\n|   +-- logs.csv\n|   +-- image/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.

\n\n

If we go back to the calculator example, the following code will create the interface embedded below it.

\n\n
iface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n
\n\n

\n\n

When users click the flag button, the csv file will now include a column indicating the selected option.

\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n
\n\n

The HuggingFaceDatasetSaver Callback

\n\n

Sometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.

\n\n

We've made this super easy with the flagging_callback parameter.

\n\n

For example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:

\n\n
import os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n    flagging_callback=hf_writer\n)\n\niface.launch()\n
\n\n

Notice that we define our own \ninstance of gradio.HuggingFaceDatasetSaver using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set allow_flagging=\"manual\"\nbecause on Hugging Face Spaces, allow_flagging is set to \"never\" by default. Here's our demo:

\n\n

\n\n

You can now see all the examples flagged above in this public Hugging Face dataset.

\n\n

\"flagging

\n\n

We created the gradio.HuggingFaceDatasetSaver class, but you can pass your own custom class as long as it inherits from FLaggingCallback defined in this file. If you create a cool callback, contribute it to the repo!

\n\n

Flagging with Blocks

\n\n

What about if you are using gradio.Blocks? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.

\n\n

At the same time, you might want to use an existing FlaggingCallback to avoid writing extra code.\nThis requires two steps:

\n\n
    \n
  1. You have to run your callback's .setup() somewhere in the code prior to the \nfirst time you flag data
  2. \n
  3. When the flagging button is clicked, then you trigger the callback's .flag() method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing.
  4. \n
\n\n

Here is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default CSVLogger:

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n    sepia_filter = strength * np.array(\n        [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n    ) + (1-strength) * np.identity(3)\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            img_input = gr.Image()\n            strength = gr.Slider(0, 1, 0.5)\n        img_output = gr.Image()\n    with gr.Row():\n        btn = gr.Button(\"Flag\")\n\n    # This needs to be called at some point prior to the first call to callback.flag()\n    callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n    img_input.change(sepia, [img_input, strength], img_output)\n    strength.change(sepia, [img_input, strength], img_output)\n\n    # We can choose which components to flag -- in this case, we'll flag all of them\n    btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n
\n\n

\n\n

Privacy

\n\n

Important Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use allow_flagging=auto (when all of the data submitted through the demo is being flagged)

\n\n

That's all! Happy building :)

\n", - "tags": ["FLAGGING", "DATA"], - "spaces": [ - "https://huggingface.co/spaces/gradio/calculator-flagging-crowdsourced", - "https://huggingface.co/spaces/gradio/calculator-flagging-options", - "https://huggingface.co/spaces/gradio/calculator-flag-basic" - ], - "url": "/guides/using-flagging/", - "contributor": null - } - ], - "parent": "gradio" - }, - "huggingfacedatasetsaver": { - "class": null, - "name": "HuggingFaceDatasetSaver", - "description": "A callback that saves each flagged sample (both the input and output data) to a HuggingFace dataset.
", - "tags": { "guides": "using-flagging" }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "hf_token", - "annotation": "str", - "doc": "The HuggingFace token to use to create (and write the flagged sample to) the HuggingFace dataset (defaults to the registered one)." - }, - { - "name": "dataset_name", - "annotation": "str", - "doc": "The repo_id of the dataset to save the data to, e.g. \"image-classifier-1\" or \"username/image-classifier-1\"." - }, - { - "name": "organization", - "annotation": "str | None", - "doc": "Deprecated argument. Please pass a full dataset id (e.g. 'username/dataset_name') to `dataset_name` instead.", - "default": "None" - }, - { - "name": "private", - "annotation": "bool", - "doc": "Whether the dataset should be private (defaults to False).", - "default": "False" - }, - { - "name": "info_filename", - "annotation": "str", - "doc": "The name of the file to save the dataset info (defaults to \"dataset_infos.json\").", - "default": "\"dataset_info.json\"" - }, - { - "name": "separate_dirs", - "annotation": "bool", - "doc": "If True, each flagged item will be saved in a separate directory. This makes the flagging more robust to concurrent editing, but may be less convenient to use.", - "default": "False" - }, - { - "name": "verbose", - "annotation": "bool", - "doc": null, - "default": "True" - } - ], - "returns": { "annotation": null }, - "example": "import gradio as gr\nhf_writer = gr.HuggingFaceDatasetSaver(HF_API_TOKEN, \"image-classification-mistakes\")\ndef image_classifier(inp):\n return {'cat': 0.3, 'dog': 0.7}\ndemo = gr.Interface(fn=image_classifier, inputs=\"image\", outputs=\"label\",\n allow_flagging=\"manual\", flagging_callback=hf_writer)", - "fns": [], - "guides": [ - { - "name": "using-flagging", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 45, - "pretty_name": "Using Flagging", - "content": "# Using Flagging\n\n\n\n\n## Introduction\n\nWhen you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.\n\nGradio simplifies the collection of this data by including a **Flag** button with every `Interface`. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with `gradio.Interface` as well as with `gradio.Blocks`.\n\n## The **Flag** button in `gradio.Interface`\n\nFlagging with Gradio's `Interface` is especially easy. By default, underneath the output components, there is a button marked **Flag**. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.\n\nThere are [four parameters](https://gradio.app/docs/#interface-header) in `gradio.Interface` that control how flagging works. We will go over them in greater detail.\n\n* `allow_flagging`: this parameter can be set to either `\"manual\"` (default), `\"auto\"`, or `\"never\"`. \n * `manual`: users will see a button to flag, and samples are only flagged when the button is clicked.\n * `auto`: users will not see a button to flag, but every sample will be flagged automatically. \n * `never`: users will not see a button to flag, and no sample will be flagged. \n* `flagging_options`: this parameter can be either `None` (default) or a list of strings.\n * If `None`, then the user simply clicks on the **Flag** button and no additional options are shown.\n * If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is `[\"Incorrect\", \"Ambiguous\"]`, then buttons labeled **Flag as Incorrect** and **Flag as Ambiguous** appear. This only applies if `allow_flagging` is `\"manual\"`.\n * The chosen option is then logged along with the input and output.\n* `flagging_dir`: this parameter takes a string.\n * It represents what to name the directory where flagged data is stored.\n* `flagging_callback`: this parameter takes an instance of a subclass of the `FlaggingCallback` class\n * Using this parameter allows you to write custom code that gets run when the flag button is clicked\n * By default, this is set to an instance of `gr.CSVLogger`\n * One example is setting it to an instance of `gr.HuggingFaceDatasetSaver` which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)\n\n## What happens to flagged data?\n\nWithin the directory provided by the `flagging_dir` argument, a CSV file will log the flagged data. \n\nHere's an example: The code below creates the calculator interface embedded below it:\n\n```python\nimport gradio as gr\n\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n return num1 / num2\n\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\"\n)\n\niface.launch()\n```\n\n\n\nWhen you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged. \n\n```directory\n+-- flagged/\n| +-- logs.csv\n```\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n```\n\nIf the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an `image` input to `image` output interface will create the following structure.\n\n```directory\n+-- flagged/\n| +-- logs.csv\n| +-- image/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n_flagged/logs.csv_\n```csv\nim,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.\n\nIf we go back to the calculator example, the following code will create the interface embedded below it. \n```python\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n```\n\n\nWhen users click the flag button, the csv file will now include a column indicating the selected option.\n\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n```\n\n## The HuggingFaceDatasetSaver Callback\n\nSometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.\n\nWe've made this super easy with the `flagging_callback` parameter.\n\nFor example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:\n\n\n```python\nimport os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n flagging_callback=hf_writer\n)\n\niface.launch()\n```\n\nNotice that we define our own \ninstance of `gradio.HuggingFaceDatasetSaver` using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set `allow_flagging=\"manual\"`\nbecause on Hugging Face Spaces, `allow_flagging` is set to `\"never\"` by default. Here's our demo:\n\n\n\nYou can now see all the examples flagged above in this [public Hugging Face dataset](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo).\n\n![flagging callback hf](https://github.com/gradio-app/gradio/blob/main/guides/assets/flagging-callback-hf.png?raw=true)\n\nWe created the `gradio.HuggingFaceDatasetSaver` class, but you can pass your own custom class as long as it inherits from `FLaggingCallback` defined in [this file](https://github.com/gradio-app/gradio/blob/master/gradio/flagging.py). If you create a cool callback, contribute it to the repo! \n\n## Flagging with Blocks\n\nWhat about if you are using `gradio.Blocks`? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.\n\nAt the same time, you might want to use an existing `FlaggingCallback` to avoid writing extra code.\nThis requires two steps:\n\n1. You have to run your callback's `.setup()` somewhere in the code prior to the \nfirst time you flag data\n2. When the flagging button is clicked, then you trigger the callback's `.flag()` method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing. \n\nHere is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default `CSVLogger`:\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n sepia_filter = strength * np.array(\n [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n ) + (1-strength) * np.identity(3)\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n img_input = gr.Image()\n strength = gr.Slider(0, 1, 0.5)\n img_output = gr.Image()\n with gr.Row():\n btn = gr.Button(\"Flag\")\n \n # This needs to be called at some point prior to the first call to callback.flag()\n callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n img_input.change(sepia, [img_input, strength], img_output)\n strength.change(sepia, [img_input, strength], img_output)\n \n # We can choose which components to flag -- in this case, we'll flag all of them\n btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n```\n\n\n## Privacy\n\nImportant Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use `allow_flagging=auto` (when all of the data submitted through the demo is being flagged)\n\n### That's all! Happy building :) \n", - "html": "

Using Flagging

\n\n

Introduction

\n\n

When you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.

\n\n

Gradio simplifies the collection of this data by including a Flag button with every Interface. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with gradio.Interface as well as with gradio.Blocks.

\n\n

The Flag button in gradio.Interface

\n\n

Flagging with Gradio's Interface is especially easy. By default, underneath the output components, there is a button marked Flag. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.

\n\n

There are four parameters in gradio.Interface that control how flagging works. We will go over them in greater detail.

\n\n
    \n
  • allow_flagging: this parameter can be set to either \"manual\" (default), \"auto\", or \"never\".
    \n
      \n
    • manual: users will see a button to flag, and samples are only flagged when the button is clicked.
    • \n
    • auto: users will not see a button to flag, but every sample will be flagged automatically.
    • \n
    • never: users will not see a button to flag, and no sample will be flagged.
    • \n
  • \n
  • flagging_options: this parameter can be either None (default) or a list of strings.\n
      \n
    • If None, then the user simply clicks on the Flag button and no additional options are shown.
    • \n
    • If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is [\"Incorrect\", \"Ambiguous\"], then buttons labeled Flag as Incorrect and Flag as Ambiguous appear. This only applies if allow_flagging is \"manual\".
    • \n
    • The chosen option is then logged along with the input and output.
    • \n
  • \n
  • flagging_dir: this parameter takes a string.\n
      \n
    • It represents what to name the directory where flagged data is stored.
    • \n
  • \n
  • flagging_callback: this parameter takes an instance of a subclass of the FlaggingCallback class\n
      \n
    • Using this parameter allows you to write custom code that gets run when the flag button is clicked
    • \n
    • By default, this is set to an instance of gr.CSVLogger
    • \n
    • One example is setting it to an instance of gr.HuggingFaceDatasetSaver which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)
    • \n
  • \n
\n\n

What happens to flagged data?

\n\n

Within the directory provided by the flagging_dir argument, a CSV file will log the flagged data.

\n\n

Here's an example: The code below creates the calculator interface embedded below it:

\n\n
import gradio as gr\n\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        return num1 / num2\n\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\"\n)\n\niface.launch()\n
\n\n

\n\n

When you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged.

\n\n
+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n
\n\n

If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an image input to image output interface will create the following structure.

\n\n
+-- flagged/\n|   +-- logs.csv\n|   +-- image/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.

\n\n

If we go back to the calculator example, the following code will create the interface embedded below it.

\n\n
iface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n
\n\n

\n\n

When users click the flag button, the csv file will now include a column indicating the selected option.

\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n
\n\n

The HuggingFaceDatasetSaver Callback

\n\n

Sometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.

\n\n

We've made this super easy with the flagging_callback parameter.

\n\n

For example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:

\n\n
import os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n    flagging_callback=hf_writer\n)\n\niface.launch()\n
\n\n

Notice that we define our own \ninstance of gradio.HuggingFaceDatasetSaver using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set allow_flagging=\"manual\"\nbecause on Hugging Face Spaces, allow_flagging is set to \"never\" by default. Here's our demo:

\n\n

\n\n

You can now see all the examples flagged above in this public Hugging Face dataset.

\n\n

\"flagging

\n\n

We created the gradio.HuggingFaceDatasetSaver class, but you can pass your own custom class as long as it inherits from FLaggingCallback defined in this file. If you create a cool callback, contribute it to the repo!

\n\n

Flagging with Blocks

\n\n

What about if you are using gradio.Blocks? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.

\n\n

At the same time, you might want to use an existing FlaggingCallback to avoid writing extra code.\nThis requires two steps:

\n\n
    \n
  1. You have to run your callback's .setup() somewhere in the code prior to the \nfirst time you flag data
  2. \n
  3. When the flagging button is clicked, then you trigger the callback's .flag() method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing.
  4. \n
\n\n

Here is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default CSVLogger:

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n    sepia_filter = strength * np.array(\n        [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n    ) + (1-strength) * np.identity(3)\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            img_input = gr.Image()\n            strength = gr.Slider(0, 1, 0.5)\n        img_output = gr.Image()\n    with gr.Row():\n        btn = gr.Button(\"Flag\")\n\n    # This needs to be called at some point prior to the first call to callback.flag()\n    callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n    img_input.change(sepia, [img_input, strength], img_output)\n    strength.change(sepia, [img_input, strength], img_output)\n\n    # We can choose which components to flag -- in this case, we'll flag all of them\n    btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n
\n\n

\n\n

Privacy

\n\n

Important Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use allow_flagging=auto (when all of the data submitted through the demo is being flagged)

\n\n

That's all! Happy building :)

\n", - "tags": ["FLAGGING", "DATA"], - "spaces": [ - "https://huggingface.co/spaces/gradio/calculator-flagging-crowdsourced", - "https://huggingface.co/spaces/gradio/calculator-flagging-options", - "https://huggingface.co/spaces/gradio/calculator-flag-basic" - ], - "url": "/guides/using-flagging/", - "contributor": null - } - ], - "parent": "gradio" - }, - "base": { - "class": null, - "name": "Base", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "primary_hue", - "annotation": "colors.Color | str", - "doc": "The primary hue of the theme. Load a preset, like gradio.themes.colors.green (or just the string \"green\"), or pass your own gradio.themes.utils.Color object.", - "default": "Color()" - }, - { - "name": "secondary_hue", - "annotation": "colors.Color | str", - "doc": "The secondary hue of the theme. Load a preset, like gradio.themes.colors.green (or just the string \"green\"), or pass your own gradio.themes.utils.Color object.", - "default": "Color()" - }, - { - "name": "neutral_hue", - "annotation": "colors.Color | str", - "doc": "The neutral hue of the theme, used . Load a preset, like gradio.themes.colors.green (or just the string \"green\"), or pass your own gradio.themes.utils.Color object.", - "default": "Color()" - }, - { - "name": "text_size", - "annotation": "sizes.Size | str", - "doc": "The size of the text. Load a preset, like gradio.themes.sizes.text_sm (or just the string \"sm\"), or pass your own gradio.themes.utils.Size object.", - "default": "Size()" - }, - { - "name": "spacing_size", - "annotation": "sizes.Size | str", - "doc": "The size of the spacing. Load a preset, like gradio.themes.sizes.spacing_sm (or just the string \"sm\"), or pass your own gradio.themes.utils.Size object.", - "default": "Size()" - }, - { - "name": "radius_size", - "annotation": "sizes.Size | str", - "doc": "The radius size of corners. Load a preset, like gradio.themes.sizes.radius_sm (or just the string \"sm\"), or pass your own gradio.themes.utils.Size object.", - "default": "Size()" - }, - { - "name": "font", - "annotation": "fonts.Font | str | Iterable[fonts.Font | str]", - "doc": "The primary font to use for the theme. Pass a string for a system font, or a gradio.themes.font.GoogleFont object to load a font from Google Fonts. Pass a list of fonts for fallbacks.", - "default": "(, 'ui-sans-serif', 'system-ui', 'sans-serif')" - }, - { - "name": "font_mono", - "annotation": "fonts.Font | str | Iterable[fonts.Font | str]", - "doc": "The monospace font to use for the theme, applies to code. Pass a string for a system font, or a gradio.themes.font.GoogleFont object to load a font from Google Fonts. Pass a list of fonts for fallbacks.", - "default": "(, 'ui-monospace', 'Consolas', 'monospace')" - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "push_to_hub", - "description": "Upload a theme to the HuggingFace hub.
This requires a HuggingFace account.
", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "repo_name", - "annotation": "str", - "doc": "The name of the repository to store the theme assets, e.g. 'my_theme' or 'sunset'." - }, - { - "name": "org_name", - "annotation": "str | None", - "doc": "The name of the org to save the space in. If None (the default), the username corresponding to the logged in user, or h\u0192_token is used.", - "default": "None" - }, - { - "name": "version", - "annotation": "str | None", - "doc": "A semantic version tag for theme. Bumping the version tag lets you publish updates to a theme without changing the look of applications that already loaded your theme.", - "default": "None" - }, - { - "name": "hf_token", - "annotation": "str | None", - "doc": "API token for your HuggingFace account", - "default": "None" - }, - { - "name": "theme_name", - "annotation": "str | None", - "doc": "Name for the name. If None, defaults to repo_name", - "default": "None" - }, - { - "name": "description", - "annotation": "str | None", - "doc": "A long form description to your theme.", - "default": "None" - }, - { - "name": "private", - "annotation": "bool", - "doc": null, - "default": "False" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Base" - }, - { - "fn": null, - "name": "from_hub", - "description": "Load a theme from the hub.
This DOES NOT require a HuggingFace account for downloading publicly available themes.
", - "tags": {}, - "parameters": [ - { - "name": "repo_name", - "annotation": "str", - "doc": "string of the form /@. If a semantic version expression is omitted, the latest version will be fetched." - }, - { - "name": "hf_token", - "annotation": "str | None", - "doc": "HuggingFace Token. Only needed to download private themes.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Base" - }, - { - "fn": null, - "name": "load", - "description": "Load a theme from a json file.
", - "tags": {}, - "parameters": [ - { - "name": "path", - "annotation": "str", - "doc": "The filepath to read." - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Base" - }, - { - "fn": null, - "name": "dump", - "description": "Write the theme to a json file.
", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "filename", - "annotation": "str", - "doc": "The path to write the theme too" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Base" - }, - { - "fn": null, - "name": "from_dict", - "description": "Create a theme instance from a dictionary representation.
", - "tags": {}, - "parameters": [ - { - "name": "theme", - "annotation": "dict[str, dict[str, str]]", - "doc": "The dictionary representation of the theme." - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Base" - }, - { - "fn": null, - "name": "to_dict", - "description": "Convert the theme into a python dictionary.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Base" - } - ], - "parent": "gradio" - }, - "queue": { - "class": null, - "name": "queue", - "description": "You can control the rate of processed requests by creating a queue. This will allow you to set the number of requests to be processed at one time, and will let users know their position in the queue.", - "tags": { - "parameters": "concurrency_count: Number of worker threads that will be processing requests from the queue concurrently. Increasing this number will increase the rate at which requests are processed, but will also increase the memory usage of the queue.
status_update_rate: If \"auto\", Queue will send status estimations to all clients whenever a job is finished. Otherwise Queue will send status at regular intervals set by this parameter as the number of seconds.
client_position_to_load_data: DEPRECATED. This parameter is deprecated and has no effect.
default_enabled: Deprecated and has no effect.
api_open: If True, the REST routes of the backend will be open, allowing requests made directly to those endpoints to skip the queue.
max_size: The maximum number of events the queue will store at any given moment. If the queue is full, new events will not be added and a user will receive a message saying that the queue is full. If None, the queue size will be unlimited.
with gr.Blocks() as demo:
button = gr.Button(label=\"Generate Image\")
button.click(fn=image_generator, inputs=gr.Textbox(), outputs=gr.Image())
demo.queue(concurrency_count=3)
demo.launch()
demo = gr.Interface(image_generator, gr.Textbox(), gr.Image())
demo.queue(concurrency_count=3)
demo.launch()" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "concurrency_count", - "annotation": "int", - "doc": "Number of worker threads that will be processing requests from the queue concurrently. Increasing this number will increase the rate at which requests are processed, but will also increase the memory usage of the queue.", - "default": "1" - }, - { - "name": "status_update_rate", - "annotation": "float | Literal['auto']", - "doc": "If \"auto\", Queue will send status estimations to all clients whenever a job is finished. Otherwise Queue will send status at regular intervals set by this parameter as the number of seconds.", - "default": "\"auto\"" - }, - { - "name": "client_position_to_load_data", - "annotation": "int | None", - "doc": "DEPRECATED. This parameter is deprecated and has no effect.", - "default": "None" - }, - { - "name": "default_enabled", - "annotation": "bool | None", - "doc": "Deprecated and has no effect.", - "default": "None" - }, - { - "name": "api_open", - "annotation": "bool", - "doc": "If True, the REST routes of the backend will be open, allowing requests made directly to those endpoints to skip the queue.", - "default": "True" - }, - { - "name": "max_size", - "annotation": "int | None", - "doc": "The maximum number of events the queue will store at any given moment. If the queue is full, new events will not be added and a user will receive a message saying that the queue is full. If None, the queue size will be unlimited.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": "(\nI\nn\nt\ne\nr\nf\na\nc\ne\n)", - "fns": [], - "parent": "gradio" - }, - "blocks": { - "class": null, - "name": "Blocks", - "description": "Blocks is Gradio's low-level API that allows you to create more custom web applications and demos than Interfaces (yet still entirely in Python).

Compared to the Interface class, Blocks offers more flexibility and control over: (1) the layout of components (2) the events that trigger the execution of functions (3) data flows (e.g. inputs can trigger outputs, which can trigger the next level of outputs). Blocks also offers ways to group together related demos such as with tabs.

The basic usage of Blocks is as follows: create a Blocks object, then use it as a context (with the \"with\" statement), and then define layouts, components, or events within the Blocks context. Finally, call the launch() method to launch the demo.
", - "tags": { - "demos": "blocks_hello, blocks_flipper, blocks_speech_text_sentiment, generate_english_german, sound_alert", - "guides": "blocks-and-event-listeners, controlling-layout, state-in-blocks, custom-CSS-and-JS, custom-interpretations-with-blocks, using-blocks-like-functions" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "theme", - "annotation": "Theme | str | None", - "doc": "a Theme object or a string representing a theme. If a string, will look for a built-in theme with that name (e.g. \"soft\" or \"default\"), or will attempt to load a theme from the HF Hub (e.g. \"gradio/monochrome\"). If None, will use the Default theme.", - "default": "None" - }, - { - "name": "analytics_enabled", - "annotation": "bool | None", - "doc": "whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable or default to True.", - "default": "None" - }, - { - "name": "mode", - "annotation": "str", - "doc": "a human-friendly name for the kind of Blocks or Interface being created.", - "default": "\"blocks\"" - }, - { - "name": "title", - "annotation": "str", - "doc": "The tab title to display when this is opened in a browser window.", - "default": "\"Gradio\"" - }, - { - "name": "css", - "annotation": "str | None", - "doc": "custom css or path to custom css file to apply to entire Blocks", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": "import gradio as gr\ndef update(name):\n return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Start typing below and then click **Run** to see the output.\")\n with gr.Row():\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n btn = gr.Button(\"Run\")\n btn.click(fn=update, inputs=inp, outputs=out)\n\ndemo.launch()", - "fns": [ - { - "fn": null, - "name": "launch", - "description": "Launches a simple web server that serves the demo. Can also be used to create a public link used by anyone to access the demo from their browser by setting share=True.
", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "inline", - "annotation": "bool | None", - "doc": "whether to display in the interface inline in an iframe. Defaults to True in python notebooks; False otherwise.", - "default": "None" - }, - { - "name": "inbrowser", - "annotation": "bool", - "doc": "whether to automatically launch the interface in a new tab on the default browser.", - "default": "False" - }, - { - "name": "share", - "annotation": "bool | None", - "doc": "whether to create a publicly shareable link for the interface. Creates an SSH tunnel to make your UI accessible from anywhere. If not provided, it is set to False by default every time, except when running in Google Colab. When localhost is not accessible (e.g. Google Colab), setting share=False is not supported.", - "default": "None" - }, - { - "name": "debug", - "annotation": "bool", - "doc": "if True, blocks the main thread from running. If running in Google Colab, this is needed to print the errors in the cell output.", - "default": "False" - }, - { - "name": "enable_queue", - "annotation": "bool | None", - "doc": "DEPRECATED (use .queue() method instead.) if True, inference requests will be served through a queue instead of with parallel threads. Required for longer inference times (> 1min) to prevent timeout. The default option in HuggingFace Spaces is True. The default option elsewhere is False.", - "default": "None" - }, - { - "name": "max_threads", - "annotation": "int", - "doc": "the maximum number of total threads that the Gradio app can generate in parallel. The default is inherited from the starlette library (currently 40). Applies whether the queue is enabled or not. But if queuing is enabled, this parameter is increaseed to be at least the concurrency_count of the queue.", - "default": "40" - }, - { - "name": "auth", - "annotation": "Callable | tuple[str, str] | list[tuple[str, str]] | None", - "doc": "If provided, username and password (or list of username-password tuples) required to access interface. Can also provide function that takes username and password and returns True if valid login.", - "default": "None" - }, - { - "name": "auth_message", - "annotation": "str | None", - "doc": "If provided, HTML message provided on login page.", - "default": "None" - }, - { - "name": "prevent_thread_lock", - "annotation": "bool", - "doc": "If True, the interface will block the main thread while the server is running.", - "default": "False" - }, - { - "name": "show_error", - "annotation": "bool", - "doc": "If True, any errors in the interface will be displayed in an alert modal and printed in the browser console log", - "default": "False" - }, - { - "name": "server_name", - "annotation": "str | None", - "doc": "to make app accessible on local network, set this to \"0.0.0.0\". Can be set by environment variable GRADIO_SERVER_NAME. If None, will use \"127.0.0.1\".", - "default": "None" - }, - { - "name": "server_port", - "annotation": "int | None", - "doc": "will start gradio app on this port (if available). Can be set by environment variable GRADIO_SERVER_PORT. If None, will search for an available port starting at 7860.", - "default": "None" - }, - { - "name": "show_tips", - "annotation": "bool", - "doc": "if True, will occasionally show tips about new Gradio features", - "default": "False" - }, - { - "name": "height", - "annotation": "int", - "doc": "The height in pixels of the iframe element containing the interface (used if inline=True)", - "default": "500" - }, - { - "name": "width", - "annotation": "int | str", - "doc": "The width in pixels of the iframe element containing the interface (used if inline=True)", - "default": "\"100%\"" - }, - { - "name": "encrypt", - "annotation": "bool | None", - "doc": "DEPRECATED. Has no effect.", - "default": "None" - }, - { - "name": "favicon_path", - "annotation": "str | None", - "doc": "If a path to a file (.png, .gif, or .ico) is provided, it will be used as the favicon for the web page.", - "default": "None" - }, - { - "name": "ssl_keyfile", - "annotation": "str | None", - "doc": "If a path to a file is provided, will use this as the private key file to create a local server running on https.", - "default": "None" - }, - { - "name": "ssl_certfile", - "annotation": "str | None", - "doc": "If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.", - "default": "None" - }, - { - "name": "ssl_keyfile_password", - "annotation": "str | None", - "doc": "If a password is provided, will use this with the ssl certificate for https.", - "default": "None" - }, - { - "name": "ssl_verify", - "annotation": "bool", - "doc": "If False, skips certificate validation which allows self-signed certificates to be used.", - "default": "True" - }, - { - "name": "quiet", - "annotation": "bool", - "doc": "If True, suppresses most print statements.", - "default": "False" - }, - { - "name": "show_api", - "annotation": "bool", - "doc": "If True, shows the api docs in the footer of the app. Default True. If the queue is enabled, then api_open parameter of .queue() will determine if the api docs are shown, independent of the value of show_api.", - "default": "True" - }, - { - "name": "file_directories", - "annotation": "list[str] | None", - "doc": "This parameter has been renamed to `allowed_paths`. It will be removed in a future version.", - "default": "None" - }, - { - "name": "allowed_paths", - "annotation": "list[str] | None", - "doc": "List of complete filepaths or parent directories that gradio is allowed to serve (in addition to the directory containing the gradio python file). Must be absolute paths. Warning: if you provide directories, any files in these directories or their subdirectories are accessible to all users of your app.", - "default": "None" - }, - { - "name": "blocked_paths", - "annotation": "list[str] | None", - "doc": "List of complete filepaths or parent directories that gradio is not allowed to serve (i.e. users of your app are not allowed to access). Must be absolute paths. Warning: takes precedence over `allowed_paths` and all other directories exposed by Gradio by default.", - "default": "None" - }, - { - "name": "root_path", - "annotation": "str", - "doc": "The root path (or \"mount point\") of the application, if it's not served from the root (\"/\") of the domain. Often used when the application is behind a reverse proxy that forwards requests to the application. For example, if the application is served at \"https://example.com/myapp\", the `root_path` should be set to \"/myapp\".", - "default": "\"\"" - }, - { - "name": "app_kwargs", - "annotation": "dict[str, Any] | None", - "doc": "Additional keyword arguments to pass to the underlying FastAPI app as a dictionary of parameter keys and argument values. For example, `{\"docs_url\": \"/docs\"}`", - "default": "None" - } - ], - "returns": {}, - "example": "import gradio as gr\ndef reverse(text):\n return text[::-1]\nwith gr.Blocks() as demo:\n button = gr.Button(value=\"Reverse\")\n button.click(reverse, gr.Textbox(), gr.Textbox())\ndemo.launch(share=True, auth=(\"username\", \"password\"))", - "override_signature": null, - "parent": "gradio.Blocks" - }, - { - "fn": null, - "name": "queue", - "description": "You can control the rate of processed requests by creating a queue. This will allow you to set the number of requests to be processed at one time, and will let users know their position in the queue.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "concurrency_count", - "annotation": "int", - "doc": "Number of worker threads that will be processing requests from the queue concurrently. Increasing this number will increase the rate at which requests are processed, but will also increase the memory usage of the queue.", - "default": "1" - }, - { - "name": "status_update_rate", - "annotation": "float | Literal['auto']", - "doc": "If \"auto\", Queue will send status estimations to all clients whenever a job is finished. Otherwise Queue will send status at regular intervals set by this parameter as the number of seconds.", - "default": "\"auto\"" - }, - { - "name": "client_position_to_load_data", - "annotation": "int | None", - "doc": "DEPRECATED. This parameter is deprecated and has no effect.", - "default": "None" - }, - { - "name": "default_enabled", - "annotation": "bool | None", - "doc": "Deprecated and has no effect.", - "default": "None" - }, - { - "name": "api_open", - "annotation": "bool", - "doc": "If True, the REST routes of the backend will be open, allowing requests made directly to those endpoints to skip the queue.", - "default": "True" - }, - { - "name": "max_size", - "annotation": "int | None", - "doc": "The maximum number of events the queue will store at any given moment. If the queue is full, new events will not be added and a user will receive a message saying that the queue is full. If None, the queue size will be unlimited.", - "default": "None" - } - ], - "returns": {}, - "example": "with gr.Blocks() as demo:\n button = gr.Button(label=\"Generate Image\")\n button.click(fn=image_generator, inputs=gr.Textbox(), outputs=gr.Image())\ndemo.queue(concurrency_count=3)\ndemo.launch()", - "override_signature": null, - "parent": "gradio.Blocks" - }, - { - "fn": null, - "name": "integrate", - "description": "A catch-all method for integrating with other libraries. This method should be run after launch()", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "comet_ml", - "annotation": "", - "doc": "If a comet_ml Experiment object is provided, will integrate with the experiment and appear on Comet dashboard", - "default": "None" - }, - { - "name": "wandb", - "annotation": "ModuleType | None", - "doc": "If the wandb module is provided, will integrate with it and appear on WandB dashboard", - "default": "None" - }, - { - "name": "mlflow", - "annotation": "ModuleType | None", - "doc": "If the mlflow module is provided, will integrate with the experiment and appear on ML Flow dashboard", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Blocks" - }, - { - "fn": null, - "name": "load", - "description": "For reverse compatibility reasons, this is both a class method and an instance method, the two of which, confusingly, do two completely different things.

Class method: loads a demo from a Hugging Face Spaces repo and creates it locally and returns a block instance. Warning: this method will be deprecated. Use the equivalent `gradio.load()` instead.

Instance method: adds event that runs as soon as the demo loads in the browser. Example usage below.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "Instance Method - the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.", - "default": "None" - }, - { - "name": "inputs", - "annotation": "list[Component] | None", - "doc": "Instance Method - List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "list[Component] | None", - "doc": "Instance Method - List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Instance Method - Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "Instance Method - If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "str", - "doc": "Instance Method - If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "", - "doc": "Instance Method - If True, will place the request on the queue, if the queue exists", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "Instance Method - If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Instance Method - Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "Instance Method - If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "Instance Method - If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Instance Method - Run this event 'every' number of seconds. Interpreted in seconds. Queue must be enabled.", - "default": "None" - }, - { - "name": "name", - "annotation": "str | None", - "doc": "Class Method - the name of the model (e.g. \"gpt2\" or \"facebook/bart-base\") or space (e.g. \"flax-community/spanish-gpt2\"), can include the `src` as prefix (e.g. \"models/facebook/bart-base\")", - "default": "None" - }, - { - "name": "src", - "annotation": "str | None", - "doc": "Class Method - the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)", - "default": "None" - }, - { - "name": "api_key", - "annotation": "str | None", - "doc": "Class Method - optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.", - "default": "None" - }, - { - "name": "alias", - "annotation": "str | None", - "doc": "Class Method - optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)", - "default": "None" - } - ], - "returns": {}, - "example": "import gradio as gr\nimport datetime\nwith gr.Blocks() as demo:\n def get_time():\n return datetime.datetime.now().time()\n dt = gr.Textbox(label=\"Current time\")\n demo.load(get_time, inputs=None, outputs=dt)\ndemo.launch()", - "override_signature": null, - "parent": "gradio.Blocks" - } - ], - "demos": [ - [ - "blocks_hello", - "import gradio as gr\n\ndef welcome(name):\n return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Hello World!\n Start typing below to see the output.\n \"\"\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n inp.change(welcome, inp, out)\n\nif __name__ == \"__main__\":\n demo.launch()" - ], - [ - "blocks_flipper", - "import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "blocks_speech_text_sentiment", - "from transformers import pipeline\n\nimport gradio as gr\n\nasr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\nclassifier = pipeline(\"text-classification\")\n\n\ndef speech_to_text(speech):\n text = asr(speech)[\"text\"]\n return text\n\n\ndef text_to_sentiment(text):\n return classifier(text)[0][\"label\"]\n\n\ndemo = gr.Blocks()\n\nwith demo:\n audio_file = gr.Audio(type=\"filepath\")\n text = gr.Textbox()\n label = gr.Label()\n\n b1 = gr.Button(\"Recognize Speech\")\n b2 = gr.Button(\"Classify Sentiment\")\n\n b1.click(speech_to_text, inputs=audio_file, outputs=text)\n b2.click(text_to_sentiment, inputs=text, outputs=label)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "generate_english_german", - "import gradio as gr\n\nfrom transformers import pipeline\n\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english_translator\")\nenglish_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n\n\ndef generate_text(text):\n english_text = english_generator(text)[0][\"generated_text\"]\n german_text = english_translator(english_text)\n return english_text, german_text\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n seed = gr.Text(label=\"Input Phrase\")\n with gr.Column():\n english = gr.Text(label=\"Generated English Text\")\n german = gr.Text(label=\"Generated German Text\")\n btn = gr.Button(\"Generate\")\n btn.click(generate_text, inputs=[seed], outputs=[english, german])\n gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n\nif __name__ == \"__main__\":\n demo.launch()" - ], - [ - "sound_alert", - "import time\nimport gradio as gr\n\n\njs_function = \"() => {new Audio('file=beep.mp3').play();}\"\n\ndef task(x):\n time.sleep(2)\n return \"Hello, \" + x \n\nwith gr.Blocks() as demo:\n name = gr.Textbox(label=\"name\")\n greeting = gr.Textbox(label=\"greeting\")\n name.blur(task, name, greeting)\n greeting.change(None, [], [], _js=js_function) # Note that _js is a special argument whose usage may change in the future\n \ndemo.launch()" - ] - ], - "guides": [ - { - "name": "blocks-and-event-listeners", - "category": "building-with-blocks", - "pretty_category": "Building With Blocks", - "guide_index": 1, - "absolute_index": 8, - "pretty_name": "Blocks And Event Listeners", - "content": "# Blocks and Event Listeners\n\nWe took a quick look at Blocks in the [Quickstart](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control). Let's dive deeper. This guide will cover the how Blocks are structured, event listeners and their types, running events continuously, updating configurations, and using dictionaries vs lists. \n\n## Blocks Structure\n\nTake a look at the demo below.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n name = gr.Textbox(label=\"Name\")\n output = gr.Textbox(label=\"Output Box\")\n greet_btn = gr.Button(\"Greet\")\n greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n \n\ndemo.launch()\n```\n\n\n- First, note the `with gr.Blocks() as demo:` clause. The Blocks app code will be contained within this clause.\n- Next come the Components. These are the same Components used in `Interface`. However, instead of being passed to some constructor, Components are automatically added to the Blocks as they are created within the `with` clause.\n- Finally, the `click()` event listener. Event listeners define the data flow within the app. In the example above, the listener ties the two Textboxes together. The Textbox `name` acts as the input and Textbox `output` acts as the output to the `greet` method. This dataflow is triggered when the Button `greet_btn` is clicked. Like an Interface, an event listener can take multiple inputs or outputs.\n\n## Event Listeners and Interactivity\n\nIn the example above, you'll notice that you are able to edit Textbox `name`, but not Textbox `output`. This is because any Component that acts as an input to an event listener is made interactive. However, since Textbox `output` acts only as an output, Gradio determines that it should not be made interactive. You can override the default behavior and directly configure the interactivity of a Component with the boolean `interactive` keyword argument. \n\n```python\noutput = gr.Textbox(label=\"Output\", interactive=True)\n```\n\n_Note_: What happens if a Gradio component is neither an input nor an output? If a component is constructed with a default value, then it is presumed to be displaying content and is rendered non-interactive. Otherwise, it is rendered interactive. Again, this behavior can be overridden by specifying a value for the `interactive` argument.\n\n## Types of Event Listeners\n\nTake a look at the demo below:\n\n```python\nimport gradio as gr\n\ndef welcome(name):\n return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Hello World!\n Start typing below to see the output.\n \"\"\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n inp.change(welcome, inp, out)\n\ndemo.launch()\n```\n\n\nInstead of being triggered by a click, the `welcome` function is triggered by typing in the Textbox `inp`. This is due to the `change()` event listener. Different Components support different event listeners. For example, the `Video` Component supports a `play()` event listener, triggered when a user presses play. See the [Docs](http://gradio.app/docs#components) for the event listeners for each Component.\n\n## Multiple Data Flows\n\nA Blocks app is not limited to a single data flow the way Interfaces are. Take a look at the demo below:\n\n```python\nimport gradio as gr\n\ndef increase(num):\n return num + 1\n\nwith gr.Blocks() as demo:\n a = gr.Number(label=\"a\")\n b = gr.Number(label=\"b\")\n btoa = gr.Button(\"a > b\")\n atob = gr.Button(\"b > a\")\n atob.click(increase, a, b)\n btoa.click(increase, b, a)\n\ndemo.launch()\n```\n\n\nNote that `num1` can act as input to `num2`, and also vice-versa! As your apps get more complex, you will have many data flows connecting various Components. \n\nHere's an example of a \"multi-step\" demo, where the output of one model (a speech-to-text model) gets fed into the next model (a sentiment classifier).\n\n```python\nfrom transformers import pipeline\n\nimport gradio as gr\n\nasr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\nclassifier = pipeline(\"text-classification\")\n\n\ndef speech_to_text(speech):\n text = asr(speech)[\"text\"]\n return text\n\n\ndef text_to_sentiment(text):\n return classifier(text)[0][\"label\"]\n\n\ndemo = gr.Blocks()\n\nwith demo:\n audio_file = gr.Audio(type=\"filepath\")\n text = gr.Textbox()\n label = gr.Label()\n\n b1 = gr.Button(\"Recognize Speech\")\n b2 = gr.Button(\"Classify Sentiment\")\n\n b1.click(speech_to_text, inputs=audio_file, outputs=text)\n b2.click(text_to_sentiment, inputs=text, outputs=label)\n\ndemo.launch()\n\n```\n\n\n## Function Input List vs Dict\n\nThe event listeners you've seen so far have a single input component. If you'd like to have multiple input components pass data to the function, you have two options on how the function can accept input component values:\n\n1. as a list of arguments, or\n2. as a single dictionary of values, keyed by the component\n\nLet's see an example of each:\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n a = gr.Number(label=\"a\")\n b = gr.Number(label=\"b\")\n with gr.Row():\n add_btn = gr.Button(\"Add\")\n sub_btn = gr.Button(\"Subtract\")\n c = gr.Number(label=\"sum\")\n\n def add(num1, num2):\n return num1 + num2\n add_btn.click(add, inputs=[a, b], outputs=c)\n\n def sub(data):\n return data[a] - data[b]\n sub_btn.click(sub, inputs={a, b}, outputs=c)\n\n\ndemo.launch()\n```\n\nBoth `add()` and `sub()` take `a` and `b` as inputs. However, the syntax is different between these listeners. \n\n1. To the `add_btn` listener, we pass the inputs as a list. The function `add()` takes each of these inputs as arguments. The value of `a` maps to the argument `num1`, and the value of `b` maps to the argument `num2`.\n2. To the `sub_btn` listener, we pass the inputs as a set (note the curly brackets!). The function `sub()` takes a single dictionary argument `data`, where the keys are the input components, and the values are the values of those components.\n\nIt is a matter of preference which syntax you prefer! For functions with many input components, option 2 may be easier to manage.\n\n\n\n## Function Return List vs Dict\n\nSimilarly, you may return values for multiple output components either as:\n\n1. a list of values, or\n2. a dictionary keyed by the component\n\nLet's first see an example of (1), where we set the values of two output components by returning two values:\n\n```python\nwith gr.Blocks() as demo:\n food_box = gr.Number(value=10, label=\"Food Count\")\n status_box = gr.Textbox()\n def eat(food):\n if food > 0:\n return food - 1, \"full\"\n else:\n return 0, \"hungry\"\n gr.Button(\"EAT\").click(\n fn=eat, \n inputs=food_box,\n outputs=[food_box, status_box]\n )\n```\n\nAbove, each return statement returns two values corresponding to `food_box` and `status_box`, respectively.\n\nInstead of returning a list of values corresponding to each output component in order, you can also return a dictionary, with the key corresponding to the output component and the value as the new value. This also allows you to skip updating some output components. \n\n```python\nwith gr.Blocks() as demo:\n food_box = gr.Number(value=10, label=\"Food Count\")\n status_box = gr.Textbox()\n def eat(food):\n if food > 0:\n return {food_box: food - 1, status_box: \"full\"}\n else:\n return {status_box: \"hungry\"}\n gr.Button(\"EAT\").click(\n fn=eat, \n inputs=food_box,\n outputs=[food_box, status_box]\n )\n```\n\nNotice how when there is no food, we only update the `status_box` element. We skipped updating the `food_box` component.\n\nDictionary returns are helpful when an event listener affects many components on return, or conditionally affects outputs and not others.\n\nKeep in mind that with dictionary returns, we still need to specify the possible outputs in the event listener.\n\n## Updating Component Configurations\n\nThe return value of an event listener function is usually the updated value of the corresponding output Component. Sometimes we want to update the configuration of the Component as well, such as the visibility. In this case, we return a `gr.update()` object instead of just the update Component value.\n\n```python\nimport gradio as gr\n\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.update(lines=2, visible=True, value=\"Short story: \")\n elif choice == \"long\":\n return gr.update(lines=8, visible=True, value=\"Long story...\")\n else:\n return gr.update(visible=False)\n\nwith gr.Blocks() as demo:\n radio = gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"Essay Length to Write?\"\n )\n text = gr.Textbox(lines=2, interactive=True)\n radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\ndemo.launch()\n```\n\n\nSee how we can configure the Textbox itself through the `gr.update()` method. The `value=` argument can still be used to update the value along with Component configuration.\n\n## Running Events Consecutively\n\nYou can also run events consecutively by using the `then` method of an event listener. This will run an event after the previous event has finished running. This is useful for running events that update components in multiple steps. \n\nFor example, in the chatbot example below, we first update the chatbot with the user message immediately, and then update the chatbot with the computer response after a simulated delay.\n\n```python\nimport gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.Button(\"Clear\")\n\n def user(user_message, history):\n return \"\", history + [[user_message, None]]\n\n def bot(history):\n bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n time.sleep(2)\n history[-1][1] = bot_message\n return history\n\n msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n clear.click(lambda: None, None, chatbot, queue=False)\n \ndemo.queue()\ndemo.launch()\n\n```\n\n\nThe `.then()` method of an event listener executes the subsequent event regardless of whether the previous event raised any errors. If you'd like to only run subsequent events if the previous event executed successfully, use the `.success()` method, which takes the same arguments as `.then()`.\n\n## Running Events Continuously\n\nYou can run events on a fixed schedule using the `every` parameter of the event listener. This will run the event\n`every` number of seconds while the client connection is open. If the connection is closed, the event will stop running after the following iteration.\nNote that this does not take into account the runtime of the event itself. So a function\nwith a 1 second runtime running with `every=5`, would actually run every 6 seconds.\n\nHere is an example of a sine curve that updates every second!\n\n```python\nimport math\nimport gradio as gr\nimport plotly.express as px\nimport numpy as np\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2*math.pi*period * x)\n fig = px.line(x=x, y=y)\n plot_end += 2 * math.pi\n if plot_end > 1000:\n plot_end = 2 * math.pi\n return fig\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n plot = gr.Plot(label=\"Plot (updates every half second)\")\n\n dep = demo.load(get_plot, None, plot, every=1)\n period.change(get_plot, period, plot, every=1, cancels=[dep])\n\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n\n```\n\n\n## Gathering Event Data\n\nYou can gather specific data about an event by adding the associated event data class as a type hint to an argument in the event listener function. \n\nFor example, event data for `.select()` can be type hinted by a `gradio.SelectData` argument. This event is triggered when a user selects some part of the triggering component, and the event data includes information about what the user specifically selected. If a user selected a specific word in a `Textbox`, a specific image in a `Gallery`, or a specific cell in a `DataFrame`, the event data argument would contain information about the specific selection.\n\nIn the 2 player tic-tac-toe demo below, a user can select a cell in the `DataFrame` to make a move. The event data argument contains information about the specific cell that was selected. We can first check to see if the cell is empty, and then update the cell with the user's move. \n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n\n def place(board, turn, evt: gr.SelectData):\n if evt.value:\n return board, turn\n board[evt.index[0]][evt.index[1]] = turn\n turn = \"O\" if turn == \"X\" else \"X\"\n return board, turn\n\n board.select(place, [board, turn], [board, turn])\n\ndemo.launch()\n```\n", - "html": "

Blocks and Event Listeners

\n\n

We took a quick look at Blocks in the Quickstart. Let's dive deeper. This guide will cover the how Blocks are structured, event listeners and their types, running events continuously, updating configurations, and using dictionaries vs lists.

\n\n

Blocks Structure

\n\n

Take a look at the demo below.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n    name = gr.Textbox(label=\"Name\")\n    output = gr.Textbox(label=\"Output Box\")\n    greet_btn = gr.Button(\"Greet\")\n    greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n\n\ndemo.launch()\n
\n\n

\n\n
    \n
  • First, note the with gr.Blocks() as demo: clause. The Blocks app code will be contained within this clause.
  • \n
  • Next come the Components. These are the same Components used in Interface. However, instead of being passed to some constructor, Components are automatically added to the Blocks as they are created within the with clause.
  • \n
  • Finally, the click() event listener. Event listeners define the data flow within the app. In the example above, the listener ties the two Textboxes together. The Textbox name acts as the input and Textbox output acts as the output to the greet method. This dataflow is triggered when the Button greet_btn is clicked. Like an Interface, an event listener can take multiple inputs or outputs.
  • \n
\n\n

Event Listeners and Interactivity

\n\n

In the example above, you'll notice that you are able to edit Textbox name, but not Textbox output. This is because any Component that acts as an input to an event listener is made interactive. However, since Textbox output acts only as an output, Gradio determines that it should not be made interactive. You can override the default behavior and directly configure the interactivity of a Component with the boolean interactive keyword argument.

\n\n
output = gr.Textbox(label=\"Output\", interactive=True)\n
\n\n

Note: What happens if a Gradio component is neither an input nor an output? If a component is constructed with a default value, then it is presumed to be displaying content and is rendered non-interactive. Otherwise, it is rendered interactive. Again, this behavior can be overridden by specifying a value for the interactive argument.

\n\n

Types of Event Listeners

\n\n

Take a look at the demo below:

\n\n
import gradio as gr\n\ndef welcome(name):\n    return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\n    \"\"\"\n    # Hello World!\n    Start typing below to see the output.\n    \"\"\")\n    inp = gr.Textbox(placeholder=\"What is your name?\")\n    out = gr.Textbox()\n    inp.change(welcome, inp, out)\n\ndemo.launch()\n
\n\n

\n\n

Instead of being triggered by a click, the welcome function is triggered by typing in the Textbox inp. This is due to the change() event listener. Different Components support different event listeners. For example, the Video Component supports a play() event listener, triggered when a user presses play. See the Docs for the event listeners for each Component.

\n\n

Multiple Data Flows

\n\n

A Blocks app is not limited to a single data flow the way Interfaces are. Take a look at the demo below:

\n\n
import gradio as gr\n\ndef increase(num):\n    return num + 1\n\nwith gr.Blocks() as demo:\n    a = gr.Number(label=\"a\")\n    b = gr.Number(label=\"b\")\n    btoa = gr.Button(\"a > b\")\n    atob = gr.Button(\"b > a\")\n    atob.click(increase, a, b)\n    btoa.click(increase, b, a)\n\ndemo.launch()\n
\n\n

\n\n

Note that num1 can act as input to num2, and also vice-versa! As your apps get more complex, you will have many data flows connecting various Components.

\n\n

Here's an example of a \"multi-step\" demo, where the output of one model (a speech-to-text model) gets fed into the next model (a sentiment classifier).

\n\n
from transformers import pipeline\n\nimport gradio as gr\n\nasr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\nclassifier = pipeline(\"text-classification\")\n\n\ndef speech_to_text(speech):\n    text = asr(speech)[\"text\"]\n    return text\n\n\ndef text_to_sentiment(text):\n    return classifier(text)[0][\"label\"]\n\n\ndemo = gr.Blocks()\n\nwith demo:\n    audio_file = gr.Audio(type=\"filepath\")\n    text = gr.Textbox()\n    label = gr.Label()\n\n    b1 = gr.Button(\"Recognize Speech\")\n    b2 = gr.Button(\"Classify Sentiment\")\n\n    b1.click(speech_to_text, inputs=audio_file, outputs=text)\n    b2.click(text_to_sentiment, inputs=text, outputs=label)\n\ndemo.launch()\n\n
\n\n

\n\n

Function Input List vs Dict

\n\n

The event listeners you've seen so far have a single input component. If you'd like to have multiple input components pass data to the function, you have two options on how the function can accept input component values:

\n\n
    \n
  1. as a list of arguments, or
  2. \n
  3. as a single dictionary of values, keyed by the component
  4. \n
\n\n

Let's see an example of each:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    a = gr.Number(label=\"a\")\n    b = gr.Number(label=\"b\")\n    with gr.Row():\n        add_btn = gr.Button(\"Add\")\n        sub_btn = gr.Button(\"Subtract\")\n    c = gr.Number(label=\"sum\")\n\n    def add(num1, num2):\n        return num1 + num2\n    add_btn.click(add, inputs=[a, b], outputs=c)\n\n    def sub(data):\n        return data[a] - data[b]\n    sub_btn.click(sub, inputs={a, b}, outputs=c)\n\n\ndemo.launch()\n
\n\n

Both add() and sub() take a and b as inputs. However, the syntax is different between these listeners.

\n\n
    \n
  1. To the add_btn listener, we pass the inputs as a list. The function add() takes each of these inputs as arguments. The value of a maps to the argument num1, and the value of b maps to the argument num2.
  2. \n
  3. To the sub_btn listener, we pass the inputs as a set (note the curly brackets!). The function sub() takes a single dictionary argument data, where the keys are the input components, and the values are the values of those components.
  4. \n
\n\n

It is a matter of preference which syntax you prefer! For functions with many input components, option 2 may be easier to manage.

\n\n

\n\n

Function Return List vs Dict

\n\n

Similarly, you may return values for multiple output components either as:

\n\n
    \n
  1. a list of values, or
  2. \n
  3. a dictionary keyed by the component
  4. \n
\n\n

Let's first see an example of (1), where we set the values of two output components by returning two values:

\n\n
with gr.Blocks() as demo:\n    food_box = gr.Number(value=10, label=\"Food Count\")\n    status_box = gr.Textbox()\n    def eat(food):\n        if food > 0:\n            return food - 1, \"full\"\n        else:\n            return 0, \"hungry\"\n    gr.Button(\"EAT\").click(\n        fn=eat, \n        inputs=food_box,\n        outputs=[food_box, status_box]\n    )\n
\n\n

Above, each return statement returns two values corresponding to food_box and status_box, respectively.

\n\n

Instead of returning a list of values corresponding to each output component in order, you can also return a dictionary, with the key corresponding to the output component and the value as the new value. This also allows you to skip updating some output components.

\n\n
with gr.Blocks() as demo:\n    food_box = gr.Number(value=10, label=\"Food Count\")\n    status_box = gr.Textbox()\n    def eat(food):\n        if food > 0:\n            return {food_box: food - 1, status_box: \"full\"}\n        else:\n            return {status_box: \"hungry\"}\n    gr.Button(\"EAT\").click(\n        fn=eat, \n        inputs=food_box,\n        outputs=[food_box, status_box]\n    )\n
\n\n

Notice how when there is no food, we only update the status_box element. We skipped updating the food_box component.

\n\n

Dictionary returns are helpful when an event listener affects many components on return, or conditionally affects outputs and not others.

\n\n

Keep in mind that with dictionary returns, we still need to specify the possible outputs in the event listener.

\n\n

Updating Component Configurations

\n\n

The return value of an event listener function is usually the updated value of the corresponding output Component. Sometimes we want to update the configuration of the Component as well, such as the visibility. In this case, we return a gr.update() object instead of just the update Component value.

\n\n
import gradio as gr\n\ndef change_textbox(choice):\n    if choice == \"short\":\n        return gr.update(lines=2, visible=True, value=\"Short story: \")\n    elif choice == \"long\":\n        return gr.update(lines=8, visible=True, value=\"Long story...\")\n    else:\n        return gr.update(visible=False)\n\nwith gr.Blocks() as demo:\n    radio = gr.Radio(\n        [\"short\", \"long\", \"none\"], label=\"Essay Length to Write?\"\n    )\n    text = gr.Textbox(lines=2, interactive=True)\n    radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\ndemo.launch()\n
\n\n

\n\n

See how we can configure the Textbox itself through the gr.update() method. The value= argument can still be used to update the value along with Component configuration.

\n\n

Running Events Consecutively

\n\n

You can also run events consecutively by using the then method of an event listener. This will run an event after the previous event has finished running. This is useful for running events that update components in multiple steps.

\n\n

For example, in the chatbot example below, we first update the chatbot with the user message immediately, and then update the chatbot with the computer response after a simulated delay.

\n\n
import gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.Button(\"Clear\")\n\n    def user(user_message, history):\n        return \"\", history + [[user_message, None]]\n\n    def bot(history):\n        bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n        time.sleep(2)\n        history[-1][1] = bot_message\n        return history\n\n    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.queue()\ndemo.launch()\n\n
\n\n

\n\n

The .then() method of an event listener executes the subsequent event regardless of whether the previous event raised any errors. If you'd like to only run subsequent events if the previous event executed successfully, use the .success() method, which takes the same arguments as .then().

\n\n

Running Events Continuously

\n\n

You can run events on a fixed schedule using the every parameter of the event listener. This will run the event\nevery number of seconds while the client connection is open. If the connection is closed, the event will stop running after the following iteration.\nNote that this does not take into account the runtime of the event itself. So a function\nwith a 1 second runtime running with every=5, would actually run every 6 seconds.

\n\n

Here is an example of a sine curve that updates every second!

\n\n
import math\nimport gradio as gr\nimport plotly.express as px\nimport numpy as np\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n    global plot_end\n    x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n    y = np.sin(2*math.pi*period * x)\n    fig = px.line(x=x, y=y)\n    plot_end += 2 * math.pi\n    if plot_end > 1000:\n        plot_end = 2 * math.pi\n    return fig\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n            period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n            plot = gr.Plot(label=\"Plot (updates every half second)\")\n\n    dep = demo.load(get_plot, None, plot, every=1)\n    period.change(get_plot, period, plot, every=1, cancels=[dep])\n\n\nif __name__ == \"__main__\":\n    demo.queue().launch()\n\n
\n\n

\n\n

Gathering Event Data

\n\n

You can gather specific data about an event by adding the associated event data class as a type hint to an argument in the event listener function.

\n\n

For example, event data for .select() can be type hinted by a gradio.SelectData argument. This event is triggered when a user selects some part of the triggering component, and the event data includes information about what the user specifically selected. If a user selected a specific word in a Textbox, a specific image in a Gallery, or a specific cell in a DataFrame, the event data argument would contain information about the specific selection.

\n\n

In the 2 player tic-tac-toe demo below, a user can select a cell in the DataFrame to make a move. The event data argument contains information about the specific cell that was selected. We can first check to see if the cell is empty, and then update the cell with the user's move.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n    board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n\n    def place(board, turn, evt: gr.SelectData):\n        if evt.value:\n            return board, turn\n        board[evt.index[0]][evt.index[1]] = turn\n        turn = \"O\" if turn == \"X\" else \"X\"\n        return board, turn\n\n    board.select(place, [board, turn], [board, turn])\n\ndemo.launch()\n
\n\n

\n", - "tags": [], - "spaces": [], - "url": "/guides/blocks-and-event-listeners/", - "contributor": null - }, - { - "name": "controlling-layout", - "category": "building-with-blocks", - "pretty_category": "Building With Blocks", - "guide_index": 2, - "absolute_index": 9, - "pretty_name": "Controlling Layout", - "content": "# Controlling Layout\n\nBy default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the [flexbox model of web development](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Flexible_Box_Layout/Basic_Concepts_of_Flexbox).\n\n## Rows\n\nElements within a `with gr.Row` clause will all be displayed horizontally. For example, to display two Buttons side by side:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")\n```\n\nTo make every element in a Row have the same height, use the `equal_height` argument of the `style` method.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row().style(equal_height=True):\n textbox = gr.Textbox()\n btn2 = gr.Button(\"Button 2\")\n```\n\nThe widths of elements in a Row can be controlled via a combination of `scale` and `min_width` arguments that are present in every Component.\n\n- `scale` is an integer that defines how an element will take up space in a Row. If scale is set to `0`, and element will not expand to take up space. If scale is set to `1` or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, `btn1` will expand twice as much as `btn2`, while `btn0` will not expand at all:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn0 = gr.Button(\"Button 0\", scale=0)\n btn1 = gr.Button(\"Button 1\", scale=1)\n btn2 = gr.Button(\"Button 2\", scale=2)\n```\n\n- `min_width` will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all `min_width` values.\n\nLearn more about Rows in the [docs](https://gradio.app/docs/#row).\n\n## Columns and Nesting\n\nComponents within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text1 = gr.Textbox(label=\"t1\")\n slider2 = gr.Textbox(label=\"s2\")\n drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n with gr.Row():\n with gr.Column(scale=1, min_width=600):\n text1 = gr.Textbox(label=\"prompt 1\")\n text2 = gr.Textbox(label=\"prompt 2\")\n inbtw = gr.Button(\"Between\")\n text4 = gr.Textbox(label=\"prompt 1\")\n text5 = gr.Textbox(label=\"prompt 2\")\n with gr.Column(scale=2, min_width=600):\n img1 = gr.Image(\"images/cheetah.jpg\")\n btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n```\n\n\nSee how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the `scale` parameter. The column with twice the `scale` value takes up twice the width.\n\nLearn more about Columns in the [docs](https://gradio.app/docs/#column).\n\n## Tabs and Accordions\n\nYou can also create Tabs using the `with gr.Tab('tab_name'):` clause. Any component created inside of a `with gr.Tab('tab_name'):` context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.\n\nFor example:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nAlso note the `gr.Accordion('label')` in this example. The Accordion is a layout that can be toggled open or closed. Like `Tabs`, it is a layout element that can selectively hide or show content. Any components that are defined inside of a `with gr.Accordion('label'):` will be hidden or shown when the accordion's toggle icon is clicked.\n\nLearn more about [Tabs](https://gradio.app/docs/#tab) and [Accordions](https://gradio.app/docs/#accordion) in the docs.\n\n## Visibility\n\nBoth Components and Layout elements have a `visible` argument that can set initially and also updated using `gr.update()`. Setting `gr.update(visible=...)` on a Column can be used to show or hide a set of Components.\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n error_box = gr.Textbox(label=\"Error\", visible=False)\n\n name_box = gr.Textbox(label=\"Name\")\n age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n submit_btn = gr.Button(\"Submit\")\n\n with gr.Column(visible=False) as output_col:\n diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n def submit(name, age, symptoms):\n if len(name) == 0:\n return {error_box: gr.update(value=\"Enter name\", visible=True)}\n return {\n output_col: gr.update(visible=True),\n diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n patient_summary_box: f\"{name}, {age} y/o\",\n }\n\n submit_btn.click(\n submit,\n [name_box, age_box, symptoms_box],\n [error_box, diagnosis_box, patient_summary_box, output_col],\n )\n\ndemo.launch()\n\n```\n\n\n## Variable Number of Outputs\n\nBy adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a *variable numbers of outputs*. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:\n\n```python\nimport gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n textboxes = []\n for i in range(max_textboxes):\n t = gr.Textbox(f\"Textbox {i}\")\n textboxes.append(t)\n\n s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n```\n\n\n## Defining and Rendering Components Separately\n\nIn some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using `gr.Examples` above the corresponding `gr.Textbox` input. Since `gr.Examples` requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the `gr.Examples` object.\n\nThe solution to this is to define the `gr.Textbox` outside of the `gr.Blocks()` scope and use the component's `.render()` method wherever you'd like it placed in the UI.\n\nHere's a full code example:\n\n```python\ninput_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n input_textbox.render()\n```\n", - "html": "

Controlling Layout

\n\n

By default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the flexbox model of web development.

\n\n

Rows

\n\n

Elements within a with gr.Row clause will all be displayed horizontally. For example, to display two Buttons side by side:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn1 = gr.Button(\"Button 1\")\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

To make every element in a Row have the same height, use the equal_height argument of the style method.

\n\n
with gr.Blocks() as demo:\n    with gr.Row().style(equal_height=True):\n        textbox = gr.Textbox()\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

The widths of elements in a Row can be controlled via a combination of scale and min_width arguments that are present in every Component.

\n\n
    \n
  • scale is an integer that defines how an element will take up space in a Row. If scale is set to 0, and element will not expand to take up space. If scale is set to 1 or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, btn1 will expand twice as much as btn2, while btn0 will not expand at all:
  • \n
\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn0 = gr.Button(\"Button 0\", scale=0)\n        btn1 = gr.Button(\"Button 1\", scale=1)\n        btn2 = gr.Button(\"Button 2\", scale=2)\n
\n\n
    \n
  • min_width will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all min_width values.
  • \n
\n\n

Learn more about Rows in the docs.

\n\n

Columns and Nesting

\n\n

Components within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        text1 = gr.Textbox(label=\"t1\")\n        slider2 = gr.Textbox(label=\"s2\")\n        drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n    with gr.Row():\n        with gr.Column(scale=1, min_width=600):\n            text1 = gr.Textbox(label=\"prompt 1\")\n            text2 = gr.Textbox(label=\"prompt 2\")\n            inbtw = gr.Button(\"Between\")\n            text4 = gr.Textbox(label=\"prompt 1\")\n            text5 = gr.Textbox(label=\"prompt 2\")\n        with gr.Column(scale=2, min_width=600):\n            img1 = gr.Image(\"images/cheetah.jpg\")\n            btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n
\n\n

\n\n

See how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the scale parameter. The column with twice the scale value takes up twice the width.

\n\n

Learn more about Columns in the docs.

\n\n

Tabs and Accordions

\n\n

You can also create Tabs using the with gr.Tab('tab_name'): clause. Any component created inside of a with gr.Tab('tab_name'): context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.

\n\n

For example:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

Also note the gr.Accordion('label') in this example. The Accordion is a layout that can be toggled open or closed. Like Tabs, it is a layout element that can selectively hide or show content. Any components that are defined inside of a with gr.Accordion('label'): will be hidden or shown when the accordion's toggle icon is clicked.

\n\n

Learn more about Tabs and Accordions in the docs.

\n\n

Visibility

\n\n

Both Components and Layout elements have a visible argument that can set initially and also updated using gr.update(). Setting gr.update(visible=...) on a Column can be used to show or hide a set of Components.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    error_box = gr.Textbox(label=\"Error\", visible=False)\n\n    name_box = gr.Textbox(label=\"Name\")\n    age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n    symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n    submit_btn = gr.Button(\"Submit\")\n\n    with gr.Column(visible=False) as output_col:\n        diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n        patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n    def submit(name, age, symptoms):\n        if len(name) == 0:\n            return {error_box: gr.update(value=\"Enter name\", visible=True)}\n        return {\n            output_col: gr.update(visible=True),\n            diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n            patient_summary_box: f\"{name}, {age} y/o\",\n        }\n\n    submit_btn.click(\n        submit,\n        [name_box, age_box, symptoms_box],\n        [error_box, diagnosis_box, patient_summary_box, output_col],\n    )\n\ndemo.launch()\n\n
\n\n

\n\n

Variable Number of Outputs

\n\n

By adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a variable numbers of outputs. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:

\n\n
import gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n    k = int(k)\n    return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n    s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n    textboxes = []\n    for i in range(max_textboxes):\n        t = gr.Textbox(f\"Textbox {i}\")\n        textboxes.append(t)\n\n    s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n   demo.launch()\n\n
\n\n

\n\n

Defining and Rendering Components Separately

\n\n

In some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using gr.Examples above the corresponding gr.Textbox input. Since gr.Examples requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the gr.Examples object.

\n\n

The solution to this is to define the gr.Textbox outside of the gr.Blocks() scope and use the component's .render() method wherever you'd like it placed in the UI.

\n\n

Here's a full code example:

\n\n
input_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n    gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n    input_textbox.render()\n
\n", - "tags": [], - "spaces": [], - "url": "/guides/controlling-layout/", - "contributor": null - }, - { - "name": "state-in-blocks", - "category": "building-with-blocks", - "pretty_category": "Building With Blocks", - "guide_index": 3, - "absolute_index": 10, - "pretty_name": "State In Blocks", - "content": "# State in Blocks\n\nWe covered [State in Interfaces](https://gradio.app/interface-state), this guide takes a look at state in Blocks, which works mostly the same. \n\n## Global State\n\nGlobal state in Blocks works the same as in Interface. Any variable created outside a function call is a reference shared between all users.\n\n## Session State\n\nGradio supports session **state**, where data persists across multiple submits within a page session, in Blocks apps as well. To reiterate, session data is *not* shared between different users of your model. To store data in a session state, you need to do three things:\n\n1. Create a `gr.State()` object. If there is a default value to this stateful object, pass that into the constructor.\n2. In the event listener, put the `State` object as an input and output.\n3. In the event listener function, add the variable to the input parameters and the return value.\n\nLet's take a look at a game of hangman. \n\n```python\nimport gradio as gr\n\nsecret_word = \"gradio\"\n\nwith gr.Blocks() as demo: \n used_letters_var = gr.State([])\n with gr.Row() as row:\n with gr.Column():\n input_letter = gr.Textbox(label=\"Enter letter\")\n btn = gr.Button(\"Guess Letter\")\n with gr.Column():\n hangman = gr.Textbox(\n label=\"Hangman\",\n value=\"_\"*len(secret_word)\n )\n used_letters_box = gr.Textbox(label=\"Used Letters\")\n\n def guess_letter(letter, used_letters):\n used_letters.append(letter)\n answer = \"\".join([\n (letter if letter in used_letters else \"_\")\n for letter in secret_word\n ])\n return {\n used_letters_var: used_letters,\n used_letters_box: \", \".join(used_letters),\n hangman: answer\n }\n btn.click(\n guess_letter, \n [input_letter, used_letters_var],\n [used_letters_var, used_letters_box, hangman]\n )\ndemo.launch()\n```\n\n\nLet's see how we do each of the 3 steps listed above in this game:\n\n1. We store the used letters in `used_letters_var`. In the constructor of `State`, we set the initial value of this to `[]`, an empty list. \n2. In `btn.click()`, we have a reference to `used_letters_var` in both the inputs and outputs.\n3. In `guess_letter`, we pass the value of this `State` to `used_letters`, and then return an updated value of this `State` in the return statement.\n\nWith more complex apps, you will likely have many State variables storing session state in a single Blocks app.\n\nLearn more about `State` in the [docs](https://gradio.app/docs#state).\n\n\n\n", - "html": "

State in Blocks

\n\n

We covered State in Interfaces, this guide takes a look at state in Blocks, which works mostly the same.

\n\n

Global State

\n\n

Global state in Blocks works the same as in Interface. Any variable created outside a function call is a reference shared between all users.

\n\n

Session State

\n\n

Gradio supports session state, where data persists across multiple submits within a page session, in Blocks apps as well. To reiterate, session data is not shared between different users of your model. To store data in a session state, you need to do three things:

\n\n
    \n
  1. Create a gr.State() object. If there is a default value to this stateful object, pass that into the constructor.
  2. \n
  3. In the event listener, put the State object as an input and output.
  4. \n
  5. In the event listener function, add the variable to the input parameters and the return value.
  6. \n
\n\n

Let's take a look at a game of hangman.

\n\n
import gradio as gr\n\nsecret_word = \"gradio\"\n\nwith gr.Blocks() as demo:    \n    used_letters_var = gr.State([])\n    with gr.Row() as row:\n        with gr.Column():\n            input_letter = gr.Textbox(label=\"Enter letter\")\n            btn = gr.Button(\"Guess Letter\")\n        with gr.Column():\n            hangman = gr.Textbox(\n                label=\"Hangman\",\n                value=\"_\"*len(secret_word)\n            )\n            used_letters_box = gr.Textbox(label=\"Used Letters\")\n\n    def guess_letter(letter, used_letters):\n        used_letters.append(letter)\n        answer = \"\".join([\n            (letter if letter in used_letters else \"_\")\n            for letter in secret_word\n        ])\n        return {\n            used_letters_var: used_letters,\n            used_letters_box: \", \".join(used_letters),\n            hangman: answer\n        }\n    btn.click(\n        guess_letter, \n        [input_letter, used_letters_var],\n        [used_letters_var, used_letters_box, hangman]\n        )\ndemo.launch()\n
\n\n

\n\n

Let's see how we do each of the 3 steps listed above in this game:

\n\n
    \n
  1. We store the used letters in used_letters_var. In the constructor of State, we set the initial value of this to [], an empty list.
  2. \n
  3. In btn.click(), we have a reference to used_letters_var in both the inputs and outputs.
  4. \n
  5. In guess_letter, we pass the value of this State to used_letters, and then return an updated value of this State in the return statement.
  6. \n
\n\n

With more complex apps, you will likely have many State variables storing session state in a single Blocks app.

\n\n

Learn more about State in the docs.

\n", - "tags": [], - "spaces": [], - "url": "/guides/state-in-blocks/", - "contributor": null - }, - { - "name": "custom-CSS-and-JS", - "category": "building-with-blocks", - "pretty_category": "Building With Blocks", - "guide_index": 4, - "absolute_index": 11, - "pretty_name": "Custom CSS And JS", - "content": "# Custom JS and CSS\n\nThis guide covers how to style Blocks with more flexibility, as well as adding Javascript code to event listeners. \n\n**Warning**: The use of query selectors in custom JS and CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using query selectors sparingly.\n\n## Custom CSS\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Blocks` constructor. For example:\n\n```python\nwith gr.Blocks(theme=gr.themes.Glass()):\n ...\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\n\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n```python\nwith gr.Blocks(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\nwith gr.Blocks(css=\".gradio-container {background: url('file=clouds.jpg')}\") as demo:\n ...\n```\n\nYou can also pass the filepath to a CSS file to the `css` argument. \n\n## The `elem_id` and `elem_classes` Arguments\n\nYou can `elem_id` to add an HTML element `id` to any component, and `elem_classes` to add a class or list of classes. This will allow you to select elements more easily with CSS. This approach is also more likely to be stable across Gradio versions as built-in class names or ids may change (however, as mentioned in the warning above, we cannot guarantee complete compatibility between Gradio versions if you use custom CSS as the DOM elements may themselves change).\n\n```python\ncss = \"\"\"\n#warning {background-color: #FFCCCB} \n.feedback textarea {font-size: 24px !important}\n\"\"\"\n\nwith gr.Blocks(css=css) as demo:\n box1 = gr.Textbox(value=\"Good Job\", elem_classes=\"feedback\")\n box2 = gr.Textbox(value=\"Failure\", elem_id=\"warning\", elem_classes=\"feedback\")\n```\n\nThe CSS `#warning` ruleset will only target the second Textbox, while the `.feedback` ruleset will target both. Note that when targeting classes, you might need to put the `!important` selector to override the default Gradio styles.\n\n## Custom JS\n\nEvent listeners have a `_js` argument that can take a Javascript function as a string and treat it just like a Python event listener function. You can pass both a Javascript function and a Python function (in which case the Javascript function is run first) or only Javascript (and set the Python `fn` to `None`). Take a look at the code below:\n\n```python\nimport gradio as gr\n\nblocks = gr.Blocks()\n\nwith blocks as demo:\n subject = gr.Textbox(placeholder=\"subject\")\n verb = gr.Radio([\"ate\", \"loved\", \"hated\"])\n object = gr.Textbox(placeholder=\"object\")\n\n with gr.Row():\n btn = gr.Button(\"Create sentence.\")\n reverse_btn = gr.Button(\"Reverse sentence.\")\n foo_bar_btn = gr.Button(\"Append foo\")\n reverse_then_to_the_server_btn = gr.Button(\n \"Reverse sentence and send to server.\"\n )\n\n def sentence_maker(w1, w2, w3):\n return f\"{w1} {w2} {w3}\"\n\n output1 = gr.Textbox(label=\"output 1\")\n output2 = gr.Textbox(label=\"verb\")\n output3 = gr.Textbox(label=\"verb reversed\")\n output4 = gr.Textbox(label=\"front end process and then send to backend\")\n\n btn.click(sentence_maker, [subject, verb, object], output1)\n reverse_btn.click(\n None, [subject, verb, object], output2, _js=\"(s, v, o) => o + ' ' + v + ' ' + s\"\n )\n verb.change(lambda x: x, verb, output3, _js=\"(x) => [...x].reverse().join('')\")\n foo_bar_btn.click(None, [], subject, _js=\"(x) => x + ' foo'\")\n\n reverse_then_to_the_server_btn.click(\n sentence_maker,\n [subject, verb, object],\n output4,\n _js=\"(s, v, o) => [s, v, o].map(x => [...x].reverse().join(''))\",\n )\n\ndemo.launch()\n\n```\n", - "html": "

Custom JS and CSS

\n\n

This guide covers how to style Blocks with more flexibility, as well as adding Javascript code to event listeners.

\n\n

Warning: The use of query selectors in custom JS and CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using query selectors sparingly.

\n\n

Custom CSS

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Blocks constructor. For example:

\n\n
with gr.Blocks(theme=gr.themes.Glass()):\n    ...\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.

\n\n

The base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Blocks(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
with gr.Blocks(css=\".gradio-container {background: url('file=clouds.jpg')}\") as demo:\n    ...\n
\n\n

You can also pass the filepath to a CSS file to the css argument.

\n\n

The elem_id and elem_classes Arguments

\n\n

You can elem_id to add an HTML element id to any component, and elem_classes to add a class or list of classes. This will allow you to select elements more easily with CSS. This approach is also more likely to be stable across Gradio versions as built-in class names or ids may change (however, as mentioned in the warning above, we cannot guarantee complete compatibility between Gradio versions if you use custom CSS as the DOM elements may themselves change).

\n\n
css = \"\"\"\n#warning {background-color: #FFCCCB} \n.feedback textarea {font-size: 24px !important}\n\"\"\"\n\nwith gr.Blocks(css=css) as demo:\n    box1 = gr.Textbox(value=\"Good Job\", elem_classes=\"feedback\")\n    box2 = gr.Textbox(value=\"Failure\", elem_id=\"warning\", elem_classes=\"feedback\")\n
\n\n

The CSS #warning ruleset will only target the second Textbox, while the .feedback ruleset will target both. Note that when targeting classes, you might need to put the !important selector to override the default Gradio styles.

\n\n

Custom JS

\n\n

Event listeners have a _js argument that can take a Javascript function as a string and treat it just like a Python event listener function. You can pass both a Javascript function and a Python function (in which case the Javascript function is run first) or only Javascript (and set the Python fn to None). Take a look at the code below:

\n\n
import gradio as gr\n\nblocks = gr.Blocks()\n\nwith blocks as demo:\n    subject = gr.Textbox(placeholder=\"subject\")\n    verb = gr.Radio([\"ate\", \"loved\", \"hated\"])\n    object = gr.Textbox(placeholder=\"object\")\n\n    with gr.Row():\n        btn = gr.Button(\"Create sentence.\")\n        reverse_btn = gr.Button(\"Reverse sentence.\")\n        foo_bar_btn = gr.Button(\"Append foo\")\n        reverse_then_to_the_server_btn = gr.Button(\n            \"Reverse sentence and send to server.\"\n        )\n\n    def sentence_maker(w1, w2, w3):\n        return f\"{w1} {w2} {w3}\"\n\n    output1 = gr.Textbox(label=\"output 1\")\n    output2 = gr.Textbox(label=\"verb\")\n    output3 = gr.Textbox(label=\"verb reversed\")\n    output4 = gr.Textbox(label=\"front end process and then send to backend\")\n\n    btn.click(sentence_maker, [subject, verb, object], output1)\n    reverse_btn.click(\n        None, [subject, verb, object], output2, _js=\"(s, v, o) => o + ' ' + v + ' ' + s\"\n    )\n    verb.change(lambda x: x, verb, output3, _js=\"(x) => [...x].reverse().join('')\")\n    foo_bar_btn.click(None, [], subject, _js=\"(x) => x + ' foo'\")\n\n    reverse_then_to_the_server_btn.click(\n        sentence_maker,\n        [subject, verb, object],\n        output4,\n        _js=\"(s, v, o) => [s, v, o].map(x => [...x].reverse().join(''))\",\n    )\n\ndemo.launch()\n\n
\n\n

\n", - "tags": [], - "spaces": [], - "url": "/guides/custom-CSS-and-JS/", - "contributor": null - }, - { - "name": "custom-interpretations-with-blocks", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 36, - "pretty_name": "Custom Interpretations With Blocks", - "content": "# Custom Machine Learning Interpretations with Blocks\n\n\n**Prerequisite**: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to [read the Guide to Blocks first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control) as well as the\ninterpretation section of the [Advanced Interface Features Guide](/advanced-interface-features#interpreting-your-predictions).\n\n## Introduction\n\nIf you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the `interpretation` parameter to either \"default\" or \"shap\".\n\nYou may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!\n\nThis guide will show how to:\n\n1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.\n2. Customize how interpretations are displayed in a Blocks app.\n\nLet's get started!\n\n## Setting up the Blocks app\n\nLet's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input `Textbox` and a single output `Label` component.\nBelow is the code for the app as well as the app itself.\n\n```python\nimport gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n pred = sentiment_classifier(text)\n return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n\n classify.click(classifier, input_text, label)\ndemo.launch()\n```\n\n \n\n## Adding interpretations to the app\n\nOur goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!\n\nFor each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those `(word, score)` pairs we can use gradio to visualize them for the user.\n\nThe [shap](https://shap.readthedocs.io/en/stable/index.html) library will help us compute the `(word, score)` pairs and\ngradio will take care of displaying the output to the user.\n\nThe following code computes the `(word, score)` pairs:\n\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n \n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n # Scores contains (word, score) pairs\n \n \n # Format expected by gr.components.Interpretation\n return {\"original\": text, \"interpretation\": scores}\n```\n\nNow, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use `gr.components.Interpretation`.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how `Interface` displays the interpretation output for text.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n interpretation = gr.components.Interpretation(input_text)\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n```\n\n \n\n\n## Customizing how the interpretation is displayed\n\nThe `gr.components.Interpretation` component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?\n\nOne way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.\n\nWe can do this by modifying our `interpretation_function` to additionally return a matplotlib bar plot.\nWe will display it with the `gr.Plot` component in a separate tab.\n\nThis is how the interpretation function will look:\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n # Filter out empty string added by shap\n scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n fig_m = plt.figure()\n \n # Select top 5 words that contribute to positive sentiment\n plt.bar(x=[s[0] for s in scores_desc[:5]],\n height=[s[1] for s in scores_desc[:5]])\n plt.title(\"Top words contributing to positive sentiment\")\n plt.ylabel(\"Shap Value\")\n plt.xlabel(\"Word\")\n return {\"original\": text, \"interpretation\": scores}, fig_m\n```\n\nAnd this is how the app code will look:\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n with gr.Tabs():\n with gr.TabItem(\"Display interpretation with built-in component\"):\n interpretation = gr.components.Interpretation(input_text)\n with gr.TabItem(\"Display interpretation with plot\"):\n interpretation_plot = gr.Plot()\n\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n```\n\nYou can see the demo below!\n\n \n\n## Beyond Sentiment Classification\nAlthough we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an `gr.Image` or `gr.Label` but the input can be almost anything (`gr.Number`, `gr.Slider`, `gr.Radio`, `gr.Image`).\n\nHere is a demo built with blocks of interpretations for an image classification model:\n\n \n\n\n## Closing remarks\n\nWe did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.\n\nWe also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.\n\nAdding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!\n", - "html": "

Custom Machine Learning Interpretations with Blocks

\n\n

Prerequisite: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to read the Guide to Blocks first as well as the\ninterpretation section of the Advanced Interface Features Guide.

\n\n

Introduction

\n\n

If you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the interpretation parameter to either \"default\" or \"shap\".

\n\n

You may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!

\n\n

This guide will show how to:

\n\n
    \n
  1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.
  2. \n
  3. Customize how interpretations are displayed in a Blocks app.
  4. \n
\n\n

Let's get started!

\n\n

Setting up the Blocks app

\n\n

Let's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input Textbox and a single output Label component.\nBelow is the code for the app as well as the app itself.

\n\n
import gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n    pred = sentiment_classifier(text)\n    return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n\n    classify.click(classifier, input_text, label)\ndemo.launch()\n
\n\n

\n\n

Adding interpretations to the app

\n\n

Our goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!

\n\n

For each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those (word, score) pairs we can use gradio to visualize them for the user.

\n\n

The shap library will help us compute the (word, score) pairs and\ngradio will take care of displaying the output to the user.

\n\n

The following code computes the (word, score) pairs:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n    # Scores contains (word, score) pairs\n\n\n    # Format expected by gr.components.Interpretation\n    return {\"original\": text, \"interpretation\": scores}\n
\n\n

Now, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use gr.components.Interpretation.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how Interface displays the interpretation output for text.

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            interpretation = gr.components.Interpretation(input_text)\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n
\n\n

\n\n

Customizing how the interpretation is displayed

\n\n

The gr.components.Interpretation component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?

\n\n

One way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.

\n\n

We can do this by modifying our interpretation_function to additionally return a matplotlib bar plot.\nWe will display it with the gr.Plot component in a separate tab.

\n\n

This is how the interpretation function will look:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n    scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n    # Filter out empty string added by shap\n    scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n    fig_m = plt.figure()\n\n    # Select top 5 words that contribute to positive sentiment\n    plt.bar(x=[s[0] for s in scores_desc[:5]],\n            height=[s[1] for s in scores_desc[:5]])\n    plt.title(\"Top words contributing to positive sentiment\")\n    plt.ylabel(\"Shap Value\")\n    plt.xlabel(\"Word\")\n    return {\"original\": text, \"interpretation\": scores}, fig_m\n
\n\n

And this is how the app code will look:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            with gr.Tabs():\n                with gr.TabItem(\"Display interpretation with built-in component\"):\n                    interpretation = gr.components.Interpretation(input_text)\n                with gr.TabItem(\"Display interpretation with plot\"):\n                    interpretation_plot = gr.Plot()\n\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n
\n\n

You can see the demo below!

\n\n

\n\n

Beyond Sentiment Classification

\n\n

Although we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an gr.Image or gr.Label but the input can be almost anything (gr.Number, gr.Slider, gr.Radio, gr.Image).

\n\n

Here is a demo built with blocks of interpretations for an image classification model:

\n\n

\n\n

Closing remarks

\n\n

We did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.

\n\n

We also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.

\n\n

Adding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!

\n", - "tags": ["INTERPRETATION", "SENTIMENT ANALYSIS"], - "spaces": [], - "url": "/guides/custom-interpretations-with-blocks/", - "contributor": null - }, - { - "name": "using-blocks-like-functions", - "category": "building-with-blocks", - "pretty_category": "Building With Blocks", - "guide_index": 5, - "absolute_index": 12, - "pretty_name": "Using Blocks Like Functions", - "content": "# Using Gradio Blocks Like Functions\n\n\n\n**Prerequisite**: This Guide builds on the Blocks Introduction. Make sure to [read that guide first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control).\n\n## Introduction\n\nDid you know that apart from being a full-stack machine learning demo, a Gradio Blocks app is also a regular-old python function!?\n\nThis means that if you have a gradio Blocks (or Interface) app called `demo`, you can use `demo` like you would any python function.\n\nSo doing something like `output = demo(\"Hello\", \"friend\")` will run the first event defined in `demo` on the inputs \"Hello\" and \"friend\" and store it\nin the variable `output`.\n\nIf I put you to sleep \ud83e\udd71, please bear with me! By using apps like functions, you can seamlessly compose Gradio apps.\nThe following section will show how.\n\n## Treating Blocks like functions\n\nLet's say we have the following demo that translates english text to german text. \n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"t5-base\")\n\n\ndef translate(text):\n return pipe(text)[0][\"translation_text\"]\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n english = gr.Textbox(label=\"English text\")\n translate_btn = gr.Button(value=\"Translate\")\n with gr.Column():\n german = gr.Textbox(label=\"German Text\")\n\n translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n examples = gr.Examples(examples=[\"I went to the supermarket yesterday.\", \"Helen is a good swimmer.\"],\n inputs=[english])\n\ndemo.launch()\n```\n\nI already went ahead and hosted it in Hugging Face spaces at [gradio/english_translator](https://huggingface.co/spaces/gradio/english_translator).\n\nYou can see the demo below as well:\n\n\n\nNow, let's say you have an app that generates english text, but you wanted to additionally generate german text.\n\nYou could either:\n\n1. Copy the source code of my english-to-german translation and paste it in your app.\n\n2. Load my english-to-german translation in your app and treat it like a normal python function.\n\nOption 1 technically always works, but it often introduces unwanted complexity.\n\nOption 2 lets you borrow the functionality you want without tightly coupling our apps.\n\nAll you have to do is call the `Blocks.load` class method in your source file.\nAfter that, you can use my translation app like a regular python function!\n\nThe following code snippet and demo shows how to use `Blocks.load`.\n\nNote that the variable `english_translator` is my english to german app, but its used in `generate_text` like a regular function.\n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english_translator\")\nenglish_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n\n\ndef generate_text(text):\n english_text = english_generator(text)[0][\"generated_text\"]\n german_text = english_translator(english_text)\n return english_text, german_text\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n seed = gr.Text(label=\"Input Phrase\")\n with gr.Column():\n english = gr.Text(label=\"Generated English Text\")\n german = gr.Text(label=\"Generated German Text\")\n btn = gr.Button(\"Generate\")\n btn.click(generate_text, inputs=[seed], outputs=[english, german])\n gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n\ndemo.launch()\n```\n\n\n\n## How to control which function in the app to use\n\nIf the app you are loading defines more than one function, you can specify which function to use\nwith the `fn_index` and `api_name` parameters.\n\nIn the code for our english to german demo, you'll see the following line:\n\n```python\ntranslate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n```\n\nThe `api_name` gives this function a unique name in our app. You can use this name to tell gradio which\nfunction in the upstream space you want to use:\n\n```python\nenglish_generator(text, api_name=\"translate-to-german\")[0][\"generated_text\"]\n```\n\nYou can also use the `fn_index` parameter.\nImagine my app also defined an english to spanish translation function.\nIn order to use it in our text generation app, we would use the following code:\n\n```python\nenglish_generator(text, fn_index=1)[0][\"generated_text\"]\n```\n\nFunctions in gradio spaces are zero-indexed, so since the spanish translator would be the second function in my space,\nyou would use index 1. \n\n## Parting Remarks\n\nWe showed how treating a Blocks app like a regular python helps you compose functionality across different apps.\nAny Blocks app can be treated like a function, but a powerful pattern is to `load` an app hosted on \n[Hugging Face Spaces](https://huggingface.co/spaces) prior to treating it like a function in your own app.\nYou can also load models hosted on the [Hugging Face Model Hub](https://huggingface.co/models) - see the [Using Hugging Face Integrations](/using_hugging_face_integrations) guide for an example.\n\n### Happy building! \u2692\ufe0f\n", - "html": "

Using Gradio Blocks Like Functions

\n\n

Prerequisite: This Guide builds on the Blocks Introduction. Make sure to read that guide first.

\n\n

Introduction

\n\n

Did you know that apart from being a full-stack machine learning demo, a Gradio Blocks app is also a regular-old python function!?

\n\n

This means that if you have a gradio Blocks (or Interface) app called demo, you can use demo like you would any python function.

\n\n

So doing something like output = demo(\"Hello\", \"friend\") will run the first event defined in demo on the inputs \"Hello\" and \"friend\" and store it\nin the variable output.

\n\n

If I put you to sleep \ud83e\udd71, please bear with me! By using apps like functions, you can seamlessly compose Gradio apps.\nThe following section will show how.

\n\n

Treating Blocks like functions

\n\n

Let's say we have the following demo that translates english text to german text.

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"t5-base\")\n\n\ndef translate(text):\n    return pipe(text)[0][\"translation_text\"]\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            english = gr.Textbox(label=\"English text\")\n            translate_btn = gr.Button(value=\"Translate\")\n        with gr.Column():\n            german = gr.Textbox(label=\"German Text\")\n\n    translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n    examples = gr.Examples(examples=[\"I went to the supermarket yesterday.\", \"Helen is a good swimmer.\"],\n                           inputs=[english])\n\ndemo.launch()\n
\n\n

I already went ahead and hosted it in Hugging Face spaces at gradio/english_translator.

\n\n

You can see the demo below as well:

\n\n

\n\n

Now, let's say you have an app that generates english text, but you wanted to additionally generate german text.

\n\n

You could either:

\n\n
    \n
  1. Copy the source code of my english-to-german translation and paste it in your app.

  2. \n
  3. Load my english-to-german translation in your app and treat it like a normal python function.

  4. \n
\n\n

Option 1 technically always works, but it often introduces unwanted complexity.

\n\n

Option 2 lets you borrow the functionality you want without tightly coupling our apps.

\n\n

All you have to do is call the Blocks.load class method in your source file.\nAfter that, you can use my translation app like a regular python function!

\n\n

The following code snippet and demo shows how to use Blocks.load.

\n\n

Note that the variable english_translator is my english to german app, but its used in generate_text like a regular function.

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english_translator\")\nenglish_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n\n\ndef generate_text(text):\n    english_text = english_generator(text)[0][\"generated_text\"]\n    german_text = english_translator(english_text)\n    return english_text, german_text\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            seed = gr.Text(label=\"Input Phrase\")\n        with gr.Column():\n            english = gr.Text(label=\"Generated English Text\")\n            german = gr.Text(label=\"Generated German Text\")\n    btn = gr.Button(\"Generate\")\n    btn.click(generate_text, inputs=[seed], outputs=[english, german])\n    gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n\ndemo.launch()\n
\n\n

\n\n

How to control which function in the app to use

\n\n

If the app you are loading defines more than one function, you can specify which function to use\nwith the fn_index and api_name parameters.

\n\n

In the code for our english to german demo, you'll see the following line:

\n\n
translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n
\n\n

The api_name gives this function a unique name in our app. You can use this name to tell gradio which\nfunction in the upstream space you want to use:

\n\n
english_generator(text, api_name=\"translate-to-german\")[0][\"generated_text\"]\n
\n\n

You can also use the fn_index parameter.\nImagine my app also defined an english to spanish translation function.\nIn order to use it in our text generation app, we would use the following code:

\n\n
english_generator(text, fn_index=1)[0][\"generated_text\"]\n
\n\n

Functions in gradio spaces are zero-indexed, so since the spanish translator would be the second function in my space,\nyou would use index 1.

\n\n

Parting Remarks

\n\n

We showed how treating a Blocks app like a regular python helps you compose functionality across different apps.\nAny Blocks app can be treated like a function, but a powerful pattern is to load an app hosted on \nHugging Face Spaces prior to treating it like a function in your own app.\nYou can also load models hosted on the Hugging Face Model Hub - see the Using Hugging Face Integrations guide for an example.

\n\n

Happy building! \u2692\ufe0f

\n", - "tags": ["TRANSLATION", "HUB", "SPACES"], - "spaces": [], - "url": "/guides/using-blocks-like-functions/", - "contributor": null - } - ], - "override_signature": "with gradio.Blocks():", - "parent": "gradio" - }, - "changeable": { - "class": null, - "name": "Changeable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Changeable" - } - ], - "parent": "gradio" - }, - "inputable": { - "class": null, - "name": "Inputable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "input", - "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Inputable" - } - ], - "parent": "gradio" - }, - "clickable": { - "class": null, - "name": "Clickable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "click", - "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Clickable" - } - ], - "parent": "gradio" - }, - "submittable": { - "class": null, - "name": "Submittable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "submit", - "description": "This listener is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Submittable" - } - ], - "parent": "gradio" - }, - "editable": { - "class": null, - "name": "Editable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "edit", - "description": "This listener is triggered when the user edits the component (e.g. image) using the built-in editor. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Editable" - } - ], - "parent": "gradio" - }, - "clearable": { - "class": null, - "name": "Clearable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "clear", - "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Clearable" - } - ], - "parent": "gradio" - }, - "playable": { - "class": null, - "name": "Playable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "play", - "description": "This listener is triggered when the user plays the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Playable" - }, - { - "fn": null, - "name": "pause", - "description": "This listener is triggered when the media stops playing for any reason (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Playable" - }, - { - "fn": null, - "name": "stop", - "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Playable" - }, - { - "fn": null, - "name": "end", - "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Playable" - } - ], - "parent": "gradio" - }, - "streamable": { - "class": null, - "name": "Streamable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "stream", - "description": "This listener is triggered when the user streams the component (e.g. a live webcam component). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Streamable" - } - ], - "parent": "gradio" - }, - "recordable": { - "class": null, - "name": "Recordable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "start_recording", - "description": "This listener is triggered when the user starts recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Recordable" - }, - { - "fn": null, - "name": "stop_recording", - "description": "This listener is triggered when the user stops recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Recordable" - } - ], - "parent": "gradio" - }, - "focusable": { - "class": null, - "name": "Focusable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "focus", - "description": "This listener is triggered when the component is focused (e.g. when the user clicks inside a textbox). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Focusable" - }, - { - "fn": null, - "name": "blur", - "description": "This listener is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Focusable" - } - ], - "parent": "gradio" - }, - "uploadable": { - "class": null, - "name": "Uploadable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "upload", - "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Uploadable" - } - ], - "parent": "gradio" - }, - "releaseable": { - "class": null, - "name": "Releaseable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "release", - "description": "This listener is triggered when the user releases the mouse on this component (e.g. when the user releases the slider). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Releaseable" - } - ], - "parent": "gradio" - }, - "selectable": { - "class": null, - "name": "Selectable", - "description": "", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": { "annotation": null }, - "example": "", - "fns": [ - { - "fn": null, - "name": "select", - "description": "This listener is triggered when the user selects from within the Component. This event has EventData of type gradio.SelectData that carries information, accessible through SelectData.index and SelectData.value. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Selectable" - } - ], - "parent": "gradio" - }, - "row": { - "class": null, - "name": "Row", - "description": "Row is a layout element within Blocks that renders all children horizontally.", - "tags": { "guides": "controlling-layout" }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "variant", - "annotation": "Literal['default', 'panel', 'compact']", - "doc": "row type, 'default' (no background), 'panel' (gray background color and rounded corners), or 'compact' (rounded corners and no internal gap).", - "default": "\"default\"" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, row will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "equal_height", - "annotation": "bool", - "doc": "If True, makes every child element have equal height", - "default": "True" - } - ], - "returns": { "annotation": null }, - "example": "with gr.Blocks() as demo:\n with gr.Row():\n gr.Image(\"lion.jpg\", scale=2)\n gr.Image(\"tiger.jpg\", scale=1)\ndemo.launch()", - "fns": [], - "guides": [ - { - "name": "controlling-layout", - "category": "building-with-blocks", - "pretty_category": "Building With Blocks", - "guide_index": 2, - "absolute_index": 9, - "pretty_name": "Controlling Layout", - "content": "# Controlling Layout\n\nBy default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the [flexbox model of web development](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Flexible_Box_Layout/Basic_Concepts_of_Flexbox).\n\n## Rows\n\nElements within a `with gr.Row` clause will all be displayed horizontally. For example, to display two Buttons side by side:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")\n```\n\nTo make every element in a Row have the same height, use the `equal_height` argument of the `style` method.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row().style(equal_height=True):\n textbox = gr.Textbox()\n btn2 = gr.Button(\"Button 2\")\n```\n\nThe widths of elements in a Row can be controlled via a combination of `scale` and `min_width` arguments that are present in every Component.\n\n- `scale` is an integer that defines how an element will take up space in a Row. If scale is set to `0`, and element will not expand to take up space. If scale is set to `1` or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, `btn1` will expand twice as much as `btn2`, while `btn0` will not expand at all:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn0 = gr.Button(\"Button 0\", scale=0)\n btn1 = gr.Button(\"Button 1\", scale=1)\n btn2 = gr.Button(\"Button 2\", scale=2)\n```\n\n- `min_width` will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all `min_width` values.\n\nLearn more about Rows in the [docs](https://gradio.app/docs/#row).\n\n## Columns and Nesting\n\nComponents within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text1 = gr.Textbox(label=\"t1\")\n slider2 = gr.Textbox(label=\"s2\")\n drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n with gr.Row():\n with gr.Column(scale=1, min_width=600):\n text1 = gr.Textbox(label=\"prompt 1\")\n text2 = gr.Textbox(label=\"prompt 2\")\n inbtw = gr.Button(\"Between\")\n text4 = gr.Textbox(label=\"prompt 1\")\n text5 = gr.Textbox(label=\"prompt 2\")\n with gr.Column(scale=2, min_width=600):\n img1 = gr.Image(\"images/cheetah.jpg\")\n btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n```\n\n\nSee how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the `scale` parameter. The column with twice the `scale` value takes up twice the width.\n\nLearn more about Columns in the [docs](https://gradio.app/docs/#column).\n\n## Tabs and Accordions\n\nYou can also create Tabs using the `with gr.Tab('tab_name'):` clause. Any component created inside of a `with gr.Tab('tab_name'):` context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.\n\nFor example:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nAlso note the `gr.Accordion('label')` in this example. The Accordion is a layout that can be toggled open or closed. Like `Tabs`, it is a layout element that can selectively hide or show content. Any components that are defined inside of a `with gr.Accordion('label'):` will be hidden or shown when the accordion's toggle icon is clicked.\n\nLearn more about [Tabs](https://gradio.app/docs/#tab) and [Accordions](https://gradio.app/docs/#accordion) in the docs.\n\n## Visibility\n\nBoth Components and Layout elements have a `visible` argument that can set initially and also updated using `gr.update()`. Setting `gr.update(visible=...)` on a Column can be used to show or hide a set of Components.\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n error_box = gr.Textbox(label=\"Error\", visible=False)\n\n name_box = gr.Textbox(label=\"Name\")\n age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n submit_btn = gr.Button(\"Submit\")\n\n with gr.Column(visible=False) as output_col:\n diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n def submit(name, age, symptoms):\n if len(name) == 0:\n return {error_box: gr.update(value=\"Enter name\", visible=True)}\n return {\n output_col: gr.update(visible=True),\n diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n patient_summary_box: f\"{name}, {age} y/o\",\n }\n\n submit_btn.click(\n submit,\n [name_box, age_box, symptoms_box],\n [error_box, diagnosis_box, patient_summary_box, output_col],\n )\n\ndemo.launch()\n\n```\n\n\n## Variable Number of Outputs\n\nBy adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a *variable numbers of outputs*. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:\n\n```python\nimport gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n textboxes = []\n for i in range(max_textboxes):\n t = gr.Textbox(f\"Textbox {i}\")\n textboxes.append(t)\n\n s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n```\n\n\n## Defining and Rendering Components Separately\n\nIn some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using `gr.Examples` above the corresponding `gr.Textbox` input. Since `gr.Examples` requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the `gr.Examples` object.\n\nThe solution to this is to define the `gr.Textbox` outside of the `gr.Blocks()` scope and use the component's `.render()` method wherever you'd like it placed in the UI.\n\nHere's a full code example:\n\n```python\ninput_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n input_textbox.render()\n```\n", - "html": "

Controlling Layout

\n\n

By default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the flexbox model of web development.

\n\n

Rows

\n\n

Elements within a with gr.Row clause will all be displayed horizontally. For example, to display two Buttons side by side:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn1 = gr.Button(\"Button 1\")\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

To make every element in a Row have the same height, use the equal_height argument of the style method.

\n\n
with gr.Blocks() as demo:\n    with gr.Row().style(equal_height=True):\n        textbox = gr.Textbox()\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

The widths of elements in a Row can be controlled via a combination of scale and min_width arguments that are present in every Component.

\n\n
    \n
  • scale is an integer that defines how an element will take up space in a Row. If scale is set to 0, and element will not expand to take up space. If scale is set to 1 or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, btn1 will expand twice as much as btn2, while btn0 will not expand at all:
  • \n
\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn0 = gr.Button(\"Button 0\", scale=0)\n        btn1 = gr.Button(\"Button 1\", scale=1)\n        btn2 = gr.Button(\"Button 2\", scale=2)\n
\n\n
    \n
  • min_width will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all min_width values.
  • \n
\n\n

Learn more about Rows in the docs.

\n\n

Columns and Nesting

\n\n

Components within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        text1 = gr.Textbox(label=\"t1\")\n        slider2 = gr.Textbox(label=\"s2\")\n        drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n    with gr.Row():\n        with gr.Column(scale=1, min_width=600):\n            text1 = gr.Textbox(label=\"prompt 1\")\n            text2 = gr.Textbox(label=\"prompt 2\")\n            inbtw = gr.Button(\"Between\")\n            text4 = gr.Textbox(label=\"prompt 1\")\n            text5 = gr.Textbox(label=\"prompt 2\")\n        with gr.Column(scale=2, min_width=600):\n            img1 = gr.Image(\"images/cheetah.jpg\")\n            btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n
\n\n

\n\n

See how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the scale parameter. The column with twice the scale value takes up twice the width.

\n\n

Learn more about Columns in the docs.

\n\n

Tabs and Accordions

\n\n

You can also create Tabs using the with gr.Tab('tab_name'): clause. Any component created inside of a with gr.Tab('tab_name'): context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.

\n\n

For example:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

Also note the gr.Accordion('label') in this example. The Accordion is a layout that can be toggled open or closed. Like Tabs, it is a layout element that can selectively hide or show content. Any components that are defined inside of a with gr.Accordion('label'): will be hidden or shown when the accordion's toggle icon is clicked.

\n\n

Learn more about Tabs and Accordions in the docs.

\n\n

Visibility

\n\n

Both Components and Layout elements have a visible argument that can set initially and also updated using gr.update(). Setting gr.update(visible=...) on a Column can be used to show or hide a set of Components.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    error_box = gr.Textbox(label=\"Error\", visible=False)\n\n    name_box = gr.Textbox(label=\"Name\")\n    age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n    symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n    submit_btn = gr.Button(\"Submit\")\n\n    with gr.Column(visible=False) as output_col:\n        diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n        patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n    def submit(name, age, symptoms):\n        if len(name) == 0:\n            return {error_box: gr.update(value=\"Enter name\", visible=True)}\n        return {\n            output_col: gr.update(visible=True),\n            diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n            patient_summary_box: f\"{name}, {age} y/o\",\n        }\n\n    submit_btn.click(\n        submit,\n        [name_box, age_box, symptoms_box],\n        [error_box, diagnosis_box, patient_summary_box, output_col],\n    )\n\ndemo.launch()\n\n
\n\n

\n\n

Variable Number of Outputs

\n\n

By adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a variable numbers of outputs. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:

\n\n
import gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n    k = int(k)\n    return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n    s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n    textboxes = []\n    for i in range(max_textboxes):\n        t = gr.Textbox(f\"Textbox {i}\")\n        textboxes.append(t)\n\n    s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n   demo.launch()\n\n
\n\n

\n\n

Defining and Rendering Components Separately

\n\n

In some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using gr.Examples above the corresponding gr.Textbox input. Since gr.Examples requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the gr.Examples object.

\n\n

The solution to this is to define the gr.Textbox outside of the gr.Blocks() scope and use the component's .render() method wherever you'd like it placed in the UI.

\n\n

Here's a full code example:

\n\n
input_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n    gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n    input_textbox.render()\n
\n", - "tags": [], - "spaces": [], - "url": "/guides/controlling-layout/", - "contributor": null - } - ], - "override_signature": "with gradio.Row():", - "parent": "gradio" - }, - "column": { - "class": null, - "name": "Column", - "description": "Column is a layout element within Blocks that renders all children vertically. The widths of columns can be set through the `scale` and `min_width` parameters. If a certain scale results in a column narrower than min_width, the min_width parameter will win.", - "tags": { "guides": "controlling-layout" }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "scale", - "annotation": "int", - "doc": "relative width compared to adjacent Columns. For example, if Column A has scale=2, and Column B has scale=1, A will be twice as wide as B.", - "default": "1" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width of Column, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in a column narrower than min_width, the min_width parameter will be respected first.", - "default": "320" - }, - { - "name": "variant", - "annotation": "Literal['default', 'panel', 'compact']", - "doc": "column type, 'default' (no background), 'panel' (gray background color and rounded corners), or 'compact' (rounded corners and no internal gap).", - "default": "\"default\"" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, column will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": "with gr.Blocks() as demo:\n with gr.Row():\n with gr.Column(scale=1):\n text1 = gr.Textbox()\n text2 = gr.Textbox()\n with gr.Column(scale=4):\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")", - "fns": [], - "guides": [ - { - "name": "controlling-layout", - "category": "building-with-blocks", - "pretty_category": "Building With Blocks", - "guide_index": 2, - "absolute_index": 9, - "pretty_name": "Controlling Layout", - "content": "# Controlling Layout\n\nBy default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the [flexbox model of web development](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Flexible_Box_Layout/Basic_Concepts_of_Flexbox).\n\n## Rows\n\nElements within a `with gr.Row` clause will all be displayed horizontally. For example, to display two Buttons side by side:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")\n```\n\nTo make every element in a Row have the same height, use the `equal_height` argument of the `style` method.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row().style(equal_height=True):\n textbox = gr.Textbox()\n btn2 = gr.Button(\"Button 2\")\n```\n\nThe widths of elements in a Row can be controlled via a combination of `scale` and `min_width` arguments that are present in every Component.\n\n- `scale` is an integer that defines how an element will take up space in a Row. If scale is set to `0`, and element will not expand to take up space. If scale is set to `1` or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, `btn1` will expand twice as much as `btn2`, while `btn0` will not expand at all:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn0 = gr.Button(\"Button 0\", scale=0)\n btn1 = gr.Button(\"Button 1\", scale=1)\n btn2 = gr.Button(\"Button 2\", scale=2)\n```\n\n- `min_width` will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all `min_width` values.\n\nLearn more about Rows in the [docs](https://gradio.app/docs/#row).\n\n## Columns and Nesting\n\nComponents within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text1 = gr.Textbox(label=\"t1\")\n slider2 = gr.Textbox(label=\"s2\")\n drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n with gr.Row():\n with gr.Column(scale=1, min_width=600):\n text1 = gr.Textbox(label=\"prompt 1\")\n text2 = gr.Textbox(label=\"prompt 2\")\n inbtw = gr.Button(\"Between\")\n text4 = gr.Textbox(label=\"prompt 1\")\n text5 = gr.Textbox(label=\"prompt 2\")\n with gr.Column(scale=2, min_width=600):\n img1 = gr.Image(\"images/cheetah.jpg\")\n btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n```\n\n\nSee how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the `scale` parameter. The column with twice the `scale` value takes up twice the width.\n\nLearn more about Columns in the [docs](https://gradio.app/docs/#column).\n\n## Tabs and Accordions\n\nYou can also create Tabs using the `with gr.Tab('tab_name'):` clause. Any component created inside of a `with gr.Tab('tab_name'):` context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.\n\nFor example:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nAlso note the `gr.Accordion('label')` in this example. The Accordion is a layout that can be toggled open or closed. Like `Tabs`, it is a layout element that can selectively hide or show content. Any components that are defined inside of a `with gr.Accordion('label'):` will be hidden or shown when the accordion's toggle icon is clicked.\n\nLearn more about [Tabs](https://gradio.app/docs/#tab) and [Accordions](https://gradio.app/docs/#accordion) in the docs.\n\n## Visibility\n\nBoth Components and Layout elements have a `visible` argument that can set initially and also updated using `gr.update()`. Setting `gr.update(visible=...)` on a Column can be used to show or hide a set of Components.\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n error_box = gr.Textbox(label=\"Error\", visible=False)\n\n name_box = gr.Textbox(label=\"Name\")\n age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n submit_btn = gr.Button(\"Submit\")\n\n with gr.Column(visible=False) as output_col:\n diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n def submit(name, age, symptoms):\n if len(name) == 0:\n return {error_box: gr.update(value=\"Enter name\", visible=True)}\n return {\n output_col: gr.update(visible=True),\n diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n patient_summary_box: f\"{name}, {age} y/o\",\n }\n\n submit_btn.click(\n submit,\n [name_box, age_box, symptoms_box],\n [error_box, diagnosis_box, patient_summary_box, output_col],\n )\n\ndemo.launch()\n\n```\n\n\n## Variable Number of Outputs\n\nBy adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a *variable numbers of outputs*. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:\n\n```python\nimport gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n textboxes = []\n for i in range(max_textboxes):\n t = gr.Textbox(f\"Textbox {i}\")\n textboxes.append(t)\n\n s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n```\n\n\n## Defining and Rendering Components Separately\n\nIn some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using `gr.Examples` above the corresponding `gr.Textbox` input. Since `gr.Examples` requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the `gr.Examples` object.\n\nThe solution to this is to define the `gr.Textbox` outside of the `gr.Blocks()` scope and use the component's `.render()` method wherever you'd like it placed in the UI.\n\nHere's a full code example:\n\n```python\ninput_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n input_textbox.render()\n```\n", - "html": "

Controlling Layout

\n\n

By default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the flexbox model of web development.

\n\n

Rows

\n\n

Elements within a with gr.Row clause will all be displayed horizontally. For example, to display two Buttons side by side:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn1 = gr.Button(\"Button 1\")\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

To make every element in a Row have the same height, use the equal_height argument of the style method.

\n\n
with gr.Blocks() as demo:\n    with gr.Row().style(equal_height=True):\n        textbox = gr.Textbox()\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

The widths of elements in a Row can be controlled via a combination of scale and min_width arguments that are present in every Component.

\n\n
    \n
  • scale is an integer that defines how an element will take up space in a Row. If scale is set to 0, and element will not expand to take up space. If scale is set to 1 or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, btn1 will expand twice as much as btn2, while btn0 will not expand at all:
  • \n
\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn0 = gr.Button(\"Button 0\", scale=0)\n        btn1 = gr.Button(\"Button 1\", scale=1)\n        btn2 = gr.Button(\"Button 2\", scale=2)\n
\n\n
    \n
  • min_width will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all min_width values.
  • \n
\n\n

Learn more about Rows in the docs.

\n\n

Columns and Nesting

\n\n

Components within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        text1 = gr.Textbox(label=\"t1\")\n        slider2 = gr.Textbox(label=\"s2\")\n        drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n    with gr.Row():\n        with gr.Column(scale=1, min_width=600):\n            text1 = gr.Textbox(label=\"prompt 1\")\n            text2 = gr.Textbox(label=\"prompt 2\")\n            inbtw = gr.Button(\"Between\")\n            text4 = gr.Textbox(label=\"prompt 1\")\n            text5 = gr.Textbox(label=\"prompt 2\")\n        with gr.Column(scale=2, min_width=600):\n            img1 = gr.Image(\"images/cheetah.jpg\")\n            btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n
\n\n

\n\n

See how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the scale parameter. The column with twice the scale value takes up twice the width.

\n\n

Learn more about Columns in the docs.

\n\n

Tabs and Accordions

\n\n

You can also create Tabs using the with gr.Tab('tab_name'): clause. Any component created inside of a with gr.Tab('tab_name'): context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.

\n\n

For example:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

Also note the gr.Accordion('label') in this example. The Accordion is a layout that can be toggled open or closed. Like Tabs, it is a layout element that can selectively hide or show content. Any components that are defined inside of a with gr.Accordion('label'): will be hidden or shown when the accordion's toggle icon is clicked.

\n\n

Learn more about Tabs and Accordions in the docs.

\n\n

Visibility

\n\n

Both Components and Layout elements have a visible argument that can set initially and also updated using gr.update(). Setting gr.update(visible=...) on a Column can be used to show or hide a set of Components.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    error_box = gr.Textbox(label=\"Error\", visible=False)\n\n    name_box = gr.Textbox(label=\"Name\")\n    age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n    symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n    submit_btn = gr.Button(\"Submit\")\n\n    with gr.Column(visible=False) as output_col:\n        diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n        patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n    def submit(name, age, symptoms):\n        if len(name) == 0:\n            return {error_box: gr.update(value=\"Enter name\", visible=True)}\n        return {\n            output_col: gr.update(visible=True),\n            diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n            patient_summary_box: f\"{name}, {age} y/o\",\n        }\n\n    submit_btn.click(\n        submit,\n        [name_box, age_box, symptoms_box],\n        [error_box, diagnosis_box, patient_summary_box, output_col],\n    )\n\ndemo.launch()\n\n
\n\n

\n\n

Variable Number of Outputs

\n\n

By adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a variable numbers of outputs. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:

\n\n
import gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n    k = int(k)\n    return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n    s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n    textboxes = []\n    for i in range(max_textboxes):\n        t = gr.Textbox(f\"Textbox {i}\")\n        textboxes.append(t)\n\n    s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n   demo.launch()\n\n
\n\n

\n\n

Defining and Rendering Components Separately

\n\n

In some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using gr.Examples above the corresponding gr.Textbox input. Since gr.Examples requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the gr.Examples object.

\n\n

The solution to this is to define the gr.Textbox outside of the gr.Blocks() scope and use the component's .render() method wherever you'd like it placed in the UI.

\n\n

Here's a full code example:

\n\n
input_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n    gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n    input_textbox.render()\n
\n", - "tags": [], - "spaces": [], - "url": "/guides/controlling-layout/", - "contributor": null - } - ], - "override_signature": "with gradio.Column():", - "parent": "gradio" - }, - "tab": { - "class": null, - "name": "Tab", - "description": "Tab (or its alias TabItem) is a layout element. Components defined within the Tab will be visible when this tab is selected tab.", - "tags": { "guides": "controlling-layout" }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "label", - "annotation": "str", - "doc": "The visual label for the tab" - }, - { - "name": "id", - "annotation": "int | str | None", - "doc": "An optional identifier for the tab, required if you wish to control the selected tab from a predict function.", - "default": "None" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": "with gr.Blocks() as demo:\n with gr.Tab(\"Lion\"):\n gr.Image(\"lion.jpg\")\n gr.Button(\"New Lion\")\n with gr.Tab(\"Tiger\"):\n gr.Image(\"tiger.jpg\")\n gr.Button(\"New Tiger\")", - "fns": [ - { - "fn": null, - "name": "select", - "description": "This listener is triggered when the user selects from within the Component. This event has EventData of type gradio.SelectData that carries information, accessible through SelectData.index and SelectData.value. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Tab" - } - ], - "guides": [ - { - "name": "controlling-layout", - "category": "building-with-blocks", - "pretty_category": "Building With Blocks", - "guide_index": 2, - "absolute_index": 9, - "pretty_name": "Controlling Layout", - "content": "# Controlling Layout\n\nBy default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the [flexbox model of web development](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Flexible_Box_Layout/Basic_Concepts_of_Flexbox).\n\n## Rows\n\nElements within a `with gr.Row` clause will all be displayed horizontally. For example, to display two Buttons side by side:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")\n```\n\nTo make every element in a Row have the same height, use the `equal_height` argument of the `style` method.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row().style(equal_height=True):\n textbox = gr.Textbox()\n btn2 = gr.Button(\"Button 2\")\n```\n\nThe widths of elements in a Row can be controlled via a combination of `scale` and `min_width` arguments that are present in every Component.\n\n- `scale` is an integer that defines how an element will take up space in a Row. If scale is set to `0`, and element will not expand to take up space. If scale is set to `1` or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, `btn1` will expand twice as much as `btn2`, while `btn0` will not expand at all:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn0 = gr.Button(\"Button 0\", scale=0)\n btn1 = gr.Button(\"Button 1\", scale=1)\n btn2 = gr.Button(\"Button 2\", scale=2)\n```\n\n- `min_width` will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all `min_width` values.\n\nLearn more about Rows in the [docs](https://gradio.app/docs/#row).\n\n## Columns and Nesting\n\nComponents within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text1 = gr.Textbox(label=\"t1\")\n slider2 = gr.Textbox(label=\"s2\")\n drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n with gr.Row():\n with gr.Column(scale=1, min_width=600):\n text1 = gr.Textbox(label=\"prompt 1\")\n text2 = gr.Textbox(label=\"prompt 2\")\n inbtw = gr.Button(\"Between\")\n text4 = gr.Textbox(label=\"prompt 1\")\n text5 = gr.Textbox(label=\"prompt 2\")\n with gr.Column(scale=2, min_width=600):\n img1 = gr.Image(\"images/cheetah.jpg\")\n btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n```\n\n\nSee how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the `scale` parameter. The column with twice the `scale` value takes up twice the width.\n\nLearn more about Columns in the [docs](https://gradio.app/docs/#column).\n\n## Tabs and Accordions\n\nYou can also create Tabs using the `with gr.Tab('tab_name'):` clause. Any component created inside of a `with gr.Tab('tab_name'):` context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.\n\nFor example:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nAlso note the `gr.Accordion('label')` in this example. The Accordion is a layout that can be toggled open or closed. Like `Tabs`, it is a layout element that can selectively hide or show content. Any components that are defined inside of a `with gr.Accordion('label'):` will be hidden or shown when the accordion's toggle icon is clicked.\n\nLearn more about [Tabs](https://gradio.app/docs/#tab) and [Accordions](https://gradio.app/docs/#accordion) in the docs.\n\n## Visibility\n\nBoth Components and Layout elements have a `visible` argument that can set initially and also updated using `gr.update()`. Setting `gr.update(visible=...)` on a Column can be used to show or hide a set of Components.\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n error_box = gr.Textbox(label=\"Error\", visible=False)\n\n name_box = gr.Textbox(label=\"Name\")\n age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n submit_btn = gr.Button(\"Submit\")\n\n with gr.Column(visible=False) as output_col:\n diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n def submit(name, age, symptoms):\n if len(name) == 0:\n return {error_box: gr.update(value=\"Enter name\", visible=True)}\n return {\n output_col: gr.update(visible=True),\n diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n patient_summary_box: f\"{name}, {age} y/o\",\n }\n\n submit_btn.click(\n submit,\n [name_box, age_box, symptoms_box],\n [error_box, diagnosis_box, patient_summary_box, output_col],\n )\n\ndemo.launch()\n\n```\n\n\n## Variable Number of Outputs\n\nBy adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a *variable numbers of outputs*. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:\n\n```python\nimport gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n textboxes = []\n for i in range(max_textboxes):\n t = gr.Textbox(f\"Textbox {i}\")\n textboxes.append(t)\n\n s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n```\n\n\n## Defining and Rendering Components Separately\n\nIn some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using `gr.Examples` above the corresponding `gr.Textbox` input. Since `gr.Examples` requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the `gr.Examples` object.\n\nThe solution to this is to define the `gr.Textbox` outside of the `gr.Blocks()` scope and use the component's `.render()` method wherever you'd like it placed in the UI.\n\nHere's a full code example:\n\n```python\ninput_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n input_textbox.render()\n```\n", - "html": "

Controlling Layout

\n\n

By default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the flexbox model of web development.

\n\n

Rows

\n\n

Elements within a with gr.Row clause will all be displayed horizontally. For example, to display two Buttons side by side:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn1 = gr.Button(\"Button 1\")\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

To make every element in a Row have the same height, use the equal_height argument of the style method.

\n\n
with gr.Blocks() as demo:\n    with gr.Row().style(equal_height=True):\n        textbox = gr.Textbox()\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

The widths of elements in a Row can be controlled via a combination of scale and min_width arguments that are present in every Component.

\n\n
    \n
  • scale is an integer that defines how an element will take up space in a Row. If scale is set to 0, and element will not expand to take up space. If scale is set to 1 or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, btn1 will expand twice as much as btn2, while btn0 will not expand at all:
  • \n
\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn0 = gr.Button(\"Button 0\", scale=0)\n        btn1 = gr.Button(\"Button 1\", scale=1)\n        btn2 = gr.Button(\"Button 2\", scale=2)\n
\n\n
    \n
  • min_width will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all min_width values.
  • \n
\n\n

Learn more about Rows in the docs.

\n\n

Columns and Nesting

\n\n

Components within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        text1 = gr.Textbox(label=\"t1\")\n        slider2 = gr.Textbox(label=\"s2\")\n        drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n    with gr.Row():\n        with gr.Column(scale=1, min_width=600):\n            text1 = gr.Textbox(label=\"prompt 1\")\n            text2 = gr.Textbox(label=\"prompt 2\")\n            inbtw = gr.Button(\"Between\")\n            text4 = gr.Textbox(label=\"prompt 1\")\n            text5 = gr.Textbox(label=\"prompt 2\")\n        with gr.Column(scale=2, min_width=600):\n            img1 = gr.Image(\"images/cheetah.jpg\")\n            btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n
\n\n

\n\n

See how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the scale parameter. The column with twice the scale value takes up twice the width.

\n\n

Learn more about Columns in the docs.

\n\n

Tabs and Accordions

\n\n

You can also create Tabs using the with gr.Tab('tab_name'): clause. Any component created inside of a with gr.Tab('tab_name'): context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.

\n\n

For example:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

Also note the gr.Accordion('label') in this example. The Accordion is a layout that can be toggled open or closed. Like Tabs, it is a layout element that can selectively hide or show content. Any components that are defined inside of a with gr.Accordion('label'): will be hidden or shown when the accordion's toggle icon is clicked.

\n\n

Learn more about Tabs and Accordions in the docs.

\n\n

Visibility

\n\n

Both Components and Layout elements have a visible argument that can set initially and also updated using gr.update(). Setting gr.update(visible=...) on a Column can be used to show or hide a set of Components.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    error_box = gr.Textbox(label=\"Error\", visible=False)\n\n    name_box = gr.Textbox(label=\"Name\")\n    age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n    symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n    submit_btn = gr.Button(\"Submit\")\n\n    with gr.Column(visible=False) as output_col:\n        diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n        patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n    def submit(name, age, symptoms):\n        if len(name) == 0:\n            return {error_box: gr.update(value=\"Enter name\", visible=True)}\n        return {\n            output_col: gr.update(visible=True),\n            diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n            patient_summary_box: f\"{name}, {age} y/o\",\n        }\n\n    submit_btn.click(\n        submit,\n        [name_box, age_box, symptoms_box],\n        [error_box, diagnosis_box, patient_summary_box, output_col],\n    )\n\ndemo.launch()\n\n
\n\n

\n\n

Variable Number of Outputs

\n\n

By adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a variable numbers of outputs. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:

\n\n
import gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n    k = int(k)\n    return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n    s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n    textboxes = []\n    for i in range(max_textboxes):\n        t = gr.Textbox(f\"Textbox {i}\")\n        textboxes.append(t)\n\n    s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n   demo.launch()\n\n
\n\n

\n\n

Defining and Rendering Components Separately

\n\n

In some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using gr.Examples above the corresponding gr.Textbox input. Since gr.Examples requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the gr.Examples object.

\n\n

The solution to this is to define the gr.Textbox outside of the gr.Blocks() scope and use the component's .render() method wherever you'd like it placed in the UI.

\n\n

Here's a full code example:

\n\n
input_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n    gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n    input_textbox.render()\n
\n", - "tags": [], - "spaces": [], - "url": "/guides/controlling-layout/", - "contributor": null - } - ], - "override_signature": "with gradio.Tab():", - "parent": "gradio" - }, - "group": { - "class": null, - "name": "Group", - "description": "Group is a layout element within Blocks which groups together children so that they do not have any padding or margin between them.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, group will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": "with gr.Group():\n gr.Textbox(label=\"First\")\n gr.Textbox(label=\"Last\")", - "fns": [], - "override_signature": "with gradio.Group():", - "parent": "gradio" - }, - "box": { - "class": null, - "name": "Box", - "description": "Box is a a layout element which places children in a box with rounded corners and some padding around them.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, box will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": "with gr.Box():\n gr.Textbox(label=\"First\")\n gr.Textbox(label=\"Last\")", - "fns": [], - "override_signature": "with gradio.Box():", - "parent": "gradio" - }, - "accordion": { - "class": null, - "name": "Accordion", - "description": "Accordion is a layout element which can be toggled to show/hide the contained content.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "label", - "annotation": "", - "doc": "name of accordion section." - }, - { - "name": "open", - "annotation": "bool", - "doc": "if True, accordion is open by default.", - "default": "True" - }, - { - "name": "visible", - "annotation": "bool", - "doc": null, - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": "with gr.Accordion(\"See Details\"):\n gr.Markdown(\"lorem ipsum\")", - "fns": [], - "parent": "gradio" - }, - "chatinterface": { - "class": null, - "name": "ChatInterface", - "description": "ChatInterface is Gradio's high-level abstraction for creating chatbot UIs, and allows you to create a web-based demo around a chatbot model in a few lines of code. Only one parameter is required: fn, which takes a function that governs the response of the chatbot based on the user input and chat history. Additional parameters can be used to control the appearance and behavior of the demo.
", - "tags": { - "demos": "chatinterface_random_response, chatinterface_streaming_echo", - "guides": "creating-a-chatbot-fast, sharing-your-app" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "fn", - "annotation": "Callable", - "doc": "the function to wrap the chat interface around. Should accept two parameters: a string input message and list of two-element lists of the form [[user_message, bot_message], ...] representing the chat history, and return a string response. See the Chatbot documentation for more information on the chat history format." - }, - { - "name": "chatbot", - "annotation": "Chatbot | None", - "doc": "an instance of the gr.Chatbot component to use for the chat interface, if you would like to customize the chatbot properties. If not provided, a default gr.Chatbot component will be created.", - "default": "None" - }, - { - "name": "textbox", - "annotation": "Textbox | None", - "doc": "an instance of the gr.Textbox component to use for the chat interface, if you would like to customize the textbox properties. If not provided, a default gr.Textbox component will be created.", - "default": "None" - }, - { - "name": "additional_inputs", - "annotation": "str | IOComponent | list[str | IOComponent] | None", - "doc": "an instance or list of instances of gradio components (or their string shortcuts) to use as additional inputs to the chatbot. If components are not already rendered in a surrounding Blocks, then the components will be displayed under the chatbot, in an accordion.", - "default": "None" - }, - { - "name": "additional_inputs_accordion_name", - "annotation": "str", - "doc": "the label of the accordion to use for additional inputs, only used if additional_inputs is provided.", - "default": "\"Additional Inputs\"" - }, - { - "name": "examples", - "annotation": "list[str] | None", - "doc": "sample inputs for the function; if provided, appear below the chatbot and can be clicked to populate the chatbot input.", - "default": "None" - }, - { - "name": "cache_examples", - "annotation": "bool | None", - "doc": "If True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False.", - "default": "None" - }, - { - "name": "title", - "annotation": "str | None", - "doc": "a title for the interface; if provided, appears above chatbot in large font. Also used as the tab title when opened in a browser window.", - "default": "None" - }, - { - "name": "description", - "annotation": "str | None", - "doc": "a description for the interface; if provided, appears above the chatbot and beneath the title in regular font. Accepts Markdown and HTML content.", - "default": "None" - }, - { - "name": "theme", - "annotation": "Theme | str | None", - "doc": "Theme to use, loaded from gradio.themes.", - "default": "None" - }, - { - "name": "css", - "annotation": "str | None", - "doc": "custom css or path to custom css file to use with interface.", - "default": "None" - }, - { - "name": "analytics_enabled", - "annotation": "bool | None", - "doc": "Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True.", - "default": "None" - }, - { - "name": "submit_btn", - "annotation": "str | None | Button", - "doc": "Text to display on the submit button. If None, no button will be displayed. If a Button object, that button will be used.", - "default": "\"Submit\"" - }, - { - "name": "stop_btn", - "annotation": "str | None | Button", - "doc": "Text to display on the stop button, which replaces the submit_btn when the submit_btn or retry_btn is clicked and response is streaming. Clicking on the stop_btn will halt the chatbot response. If set to None, stop button functionality does not appear in the chatbot. If a Button object, that button will be used as the stop button.", - "default": "\"Stop\"" - }, - { - "name": "retry_btn", - "annotation": "str | None | Button", - "doc": "Text to display on the retry button. If None, no button will be displayed. If a Button object, that button will be used.", - "default": "\"\ud83d\udd04 Retry\"" - }, - { - "name": "undo_btn", - "annotation": "str | None | Button", - "doc": "Text to display on the delete last button. If None, no button will be displayed. If a Button object, that button will be used.", - "default": "\"\u21a9\ufe0f Undo\"" - }, - { - "name": "clear_btn", - "annotation": "str | None | Button", - "doc": "Text to display on the clear button. If None, no button will be displayed. If a Button object, that button will be used.", - "default": "\"\ud83d\uddd1\ufe0f Clear\"" - }, - { - "name": "autofocus", - "annotation": "bool", - "doc": "If True, autofocuses to the textbox when the page loads.", - "default": "True" - } - ], - "returns": { "annotation": null }, - "example": "import gradio as gr\n\ndef echo(message, history):\n return message\n\ndemo = gr.ChatInterface(fn=echo, examples=[\"hello\", \"hola\", \"merhaba\"], title=\"Echo Bot\")\ndemo.launch()", - "fns": [], - "demos": [ - [ - "chatinterface_random_response", - "import random\nimport gradio as gr\n\ndef random_response(message, history):\n return random.choice([\"Yes\", \"No\"])\n\ndemo = gr.ChatInterface(random_response)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "chatinterface_streaming_echo", - "import time\nimport gradio as gr\n\ndef slow_echo(message, history):\n for i in range(len(message)):\n time.sleep(0.05)\n yield \"You typed: \" + message[: i+1]\n\ndemo = gr.ChatInterface(slow_echo).queue()\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [ - { - "name": "creating-a-chatbot-fast", - "category": "chatbots", - "pretty_category": "Chatbots", - "guide_index": 1, - "absolute_index": 13, - "pretty_name": "Creating A Chatbot Fast", - "content": "# How to Create a Chatbot with Gradio\n\n\n\n## Introduction\n\nChatbots are a popular application of large language models. Using `gradio`, you can easily build a demo of your chatbot model and share that with your users, or try it yourself using an intuitive chatbot UI.\n\nThis tutorial uses `gr.ChatInterface()`, which is a high-level abstraction that allows you to create your chatbot UI fast, often with a single line of code. The chatbot interface that we create will look something like this:\n\n\n\nWe'll start with a couple of simple examples, and then show how to use `gr.ChatInterface()` with real language models from several popular APIs and libraries, including `langchain`, `openai`, and Hugging Face. \n\n**Prerequisites**: please make sure you are using the **latest version** version of Gradio: \n\n```bash\n$ pip install --upgrade gradio\n```\n\n## Defining a chat function\n\nWhen working with `gr.ChatInterface()`, the first thing you should do is define your chat function. Your chat function should take two arguments: `message` and then `history` (the arguments can be named anything, but must be in this order).\n\n* `message`: a `str` representing the user's input.\n* `history`: a `list` of `list` representing the conversations up until that point. Each inner list consists of two `str` representing a pair: `[user input, bot response]`. \n\nYour function should return a single string response, which is the bot's response to the particular user input `message`. Your function can take into account the `history` of messages, as well as the current message.\n\nLet's take a look at a few examples.\n\n## Example: a chatbot that responds yes or no\n\nLet's write a chat function that responds `Yes` or `No` randomly.\n\nHere's our chat function:\n\n```python\nimport random\n\ndef random_response(message, history):\n return random.choice([\"Yes\", \"No\"])\n```\n\nNow, we can plug this into `gr.ChatInterface()` and call the `.launch()` method to create the web interface:\n\n```python\nimport gradio as gr\n\ngr.ChatInterface(random_response).launch()\n```\n\nThat's it! Here's our running demo, try it out:\n\n\n\n## Another example using the user's input and history\n\nOf course, the previous example was very simplistic, it didn't even take user input or the previous history into account! Here's another simple example showing how to incorporate a user's input as well as the history.\n\n```python\nimport random\nimport gradio as gr\n\ndef alternatingly_agree(message, history):\n if len(history) % 2 == 0:\n return f\"Yes, I do think that '{message}'\"\n else:\n return \"I don't think so\"\n\ngr.ChatInterface(alternatingly_agree).launch()\n```\n\n## Streaming chatbots \n\nIf in your chat function, you use `yield` to generate a sequence of responses, you'll end up with a streaming chatbot. It's that simple!\n\n```python\nimport time\nimport gradio as gr\n\ndef slow_echo(message, history):\n for i in range(len(message)):\n time.sleep(0.3)\n yield \"You typed: \" + message[: i+1]\n\ngr.ChatInterface(slow_echo).queue().launch()\n```\n\nNotice that we've [enabled queuing](/guides/key-features#queuing), which is required to use generator functions. While the response is streaming, the \"Submit\" button turns into a \"Stop\" button that can be used to stop the generator function. You can customize the appearance and behavior of the \"Stop\" button using the `stop_btn` parameter.\n\n## Customizing your chatbot\n\nIf you're familiar with Gradio's `Interface` class, the `gr.ChatInterface` includes many of the same arguments that you can use to customize the look and feel of your Chatbot. For example, you can:\n\n* add a title and description above your chatbot using `title` and `description` arguments.\n* add a theme or custom css using `theme` and `css` arguments respectively.\n* add `examples` and even enable `cache_examples`, which make it easier for users to try it out .\n* You can change the text or disable each of the buttons that appear in the chatbot interface: `submit_btn`, `retry_btn`, `undo_btn`, `clear_btn`.\n\nIf you want to customize the `gr.Chatbot` or `gr.Textbox` that compose the `ChatInterface`, then you can pass in your own chatbot or textbox as well. Here's an example of how we can use these parameters:\n\n\n```python\nimport gradio as gr\n\ndef yes_man(message, history):\n if message.endswith(\"?\"):\n return \"Yes\"\n else:\n return \"Ask me anything!\"\n\ngr.ChatInterface(\n yes_man,\n chatbot=gr.Chatbot(height=300),\n textbox=gr.Textbox(placeholder=\"Ask me a yes or no question\", container=False, scale=7),\n title=\"Yes Man\",\n description=\"Ask Yes Man any question\",\n theme=\"soft\",\n examples=[\"Hello\", \"Am I cool?\", \"Are tomatoes vegetables?\"],\n cache_examples=True,\n retry_btn=None,\n undo_btn=\"Delete Previous\",\n clear_btn=\"Clear\",\n).launch()\n```\n\n## Additional Inputs\n\nYou may want to add additional parameters to your chatbot and expose them to your users through the Chatbot UI. For example, suppose you want to add a textbox for a system prompt, or a slider that sets the number of tokens in the chatbot's response. The `ChatInterface` class supports an `additional_inputs` parameter which can be used to add additional input components.\n\nThe `additional_inputs` parameters accepts a component or a list of components. You can pass the component instances directly, or use their string shortcuts (e.g. `\"textbox\"` instead of `gr.Textbox()`). If you pass in component instances, and they have *not* already been rendered, then the components will appear underneath the chatbot (and any examples) within a `gr.Accordion()`. You can set the label of this accordion using the `additional_inputs_accordion_name` parameter. \n\nHere's a complete example:\n\n```python\nimport gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n for i in range(min(len(response), int(tokens))):\n time.sleep(0.05)\n yield response[: i+1]\n\ndemo = gr.ChatInterface(echo, \n additional_inputs=[\n gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\"), \n gr.Slider(10, 100)\n ]\n )\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n```\n\nIf the components you pass into the `additional_inputs` have already been rendered in a parent `gr.Blocks()`, then they will *not* be re-rendered in the accordion. This provides flexibility in deciding where to lay out the input components. In the example below, we position the `gr.Textbox()` on top of the Chatbot UI, while keeping the slider underneath.\n\n```python\nimport gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n for i in range(min(len(response), int(tokens))):\n time.sleep(0.05)\n yield response[: i+1]\n\nwith gr.Blocks() as demo:\n system_prompt = gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\")\n slider = gr.Slider(10, 100, render=False)\n \n gr.ChatInterface(\n echo, additional_inputs=[system_prompt, slider]\n )\n\ndemo.queue().launch()\n```\n\nIf you need to create something even more custom, then its best to construct the chatbot UI using the low-level `gr.Blocks()` API. We have [a dedicated guide for that here](/guides/creating-a-custom-chatbot-with-blocks).\n\n## Using your chatbot via an API\n\nOnce you've built your Gradio chatbot and are hosting it on [Hugging Face Spaces](https://hf.space) or somewhere else, then you can query it with a simple API at the `/chat` endpoint. The endpoint just expects the user's message (and potentially additional inputs if you have set any using the `additional_inputs` parameter), and will return the response, internally keeping track of the messages sent so far.\n\n[](https://github.com/gradio-app/gradio/assets/1778297/7b10d6db-6476-4e2e-bebd-ecda802c3b8f)\n\nTo use the endpoint, you should use either the [Gradio Python Client](/guides/getting-started-with-the-python-client) or the [Gradio JS client](/guides/getting-started-with-the-js-client).\n\n## A `langchain` example\n\nNow, let's actually use the `gr.ChatInterface` with some real large language models. We'll start by using `langchain` on top of `openai` to build a general-purpose streaming chatbot application in 19 lines of code. You'll need to have an OpenAI key for this example (keep reading for the free, open-source equivalent!)\n\n```python\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import AIMessage, HumanMessage\nimport openai\nimport gradio as gr\n\nos.envrion[\"OPENAI_API_KEY\"] = \"sk-...\" # Replace with your key\n\nllm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')\n\ndef predict(message, history):\n history_langchain_format = []\n for human, ai in history:\n history_langchain_format.append(HumanMessage(content=human))\n history_langchain_format.append(AIMessage(content=ai))\n history_langchain_format.append(HumanMessage(content=message))\n gpt_response = llm(history_langchain_format)\n return gpt_response.content\n\ngr.ChatInterface(predict).launch() \n```\n\n## A streaming example using `openai`\n\nOf course, we could also use the `openai` library directy. Here a similar example, but this time with streaming results as well:\n\n\n```python\nimport openai\nimport gradio as gr\n\nopenai.api_key = \"sk-...\" # Replace with your key\n\ndef predict(message, history):\n history_openai_format = []\n for human, assistant in history:\n history_openai_format.append({\"role\": \"user\", \"content\": human })\n history_openai_format.append({\"role\": \"assistant\", \"content\":assistant})\n history_openai_format.append({\"role\": \"user\", \"content\": message})\n\n response = openai.ChatCompletion.create(\n model='gpt-3.5-turbo',\n messages= history_openai_format, \n temperature=1.0,\n stream=True\n )\n \n partial_message = \"\"\n for chunk in response:\n if len(chunk['choices'][0]['delta']) != 0:\n partial_message = partial_message + chunk['choices'][0]['delta']['content']\n yield partial_message \n\ngr.ChatInterface(predict).queue().launch() \n```\n\n## Example using a local, open-source LLM with Hugging Face\n\nOf course, in many cases you want to run a chatbot locally. Here's the equivalent example using Together's RedePajama model, from Hugging Face (this requires you to have a GPU with CUDA).\n\n```python\nimport gradio as gr\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer\nfrom threading import Thread\n\ntokenizer = AutoTokenizer.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\")\nmodel = AutoModelForCausalLM.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\", torch_dtype=torch.float16)\nmodel = model.to('cuda:0')\n\nclass StopOnTokens(StoppingCriteria):\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n stop_ids = [29, 0]\n for stop_id in stop_ids:\n if input_ids[0][-1] == stop_id:\n return True\n return False\n\ndef predict(message, history): \n\n history_transformer_format = history + [[message, \"\"]]\n stop = StopOnTokens()\n\n messages = \"\".join([\"\".join([\"\\n:\"+item[0], \"\\n:\"+item[1]]) #curr_system_message + \n for item in history_transformer_format])\n \n model_inputs = tokenizer([messages], return_tensors=\"pt\").to(\"cuda\")\n streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)\n generate_kwargs = dict(\n model_inputs,\n streamer=streamer,\n max_new_tokens=1024,\n do_sample=True,\n top_p=0.95,\n top_k=1000,\n temperature=1.0,\n num_beams=1,\n stopping_criteria=StoppingCriteriaList([stop])\n )\n t = Thread(target=model.generate, kwargs=generate_kwargs)\n t.start()\n\n partial_message = \"\"\n for new_token in streamer:\n if new_token != '<':\n partial_message += new_token\n yield partial_message \n \n\ngr.ChatInterface(predict).queue().launch()\n```\n\nWith those examples, you should be all set to create your own Gradio Chatbot demos soon! For building even more custom Chatbot applications, check out [a dedicated guide](/guides/creating-a-custom-chatbot-with-blocks) using the low-level `gr.Blocks()` API.", - "html": "

How to Create a Chatbot with Gradio

\n\n

Introduction

\n\n

Chatbots are a popular application of large language models. Using gradio, you can easily build a demo of your chatbot model and share that with your users, or try it yourself using an intuitive chatbot UI.

\n\n

This tutorial uses gr.ChatInterface(), which is a high-level abstraction that allows you to create your chatbot UI fast, often with a single line of code. The chatbot interface that we create will look something like this:

\n\n

\n\n

We'll start with a couple of simple examples, and then show how to use gr.ChatInterface() with real language models from several popular APIs and libraries, including langchain, openai, and Hugging Face.

\n\n

Prerequisites: please make sure you are using the latest version version of Gradio:

\n\n
$ pip install --upgrade gradio\n
\n\n

Defining a chat function

\n\n

When working with gr.ChatInterface(), the first thing you should do is define your chat function. Your chat function should take two arguments: message and then history (the arguments can be named anything, but must be in this order).

\n\n
    \n
  • message: a str representing the user's input.
  • \n
  • history: a list of list representing the conversations up until that point. Each inner list consists of two str representing a pair: [user input, bot response].
  • \n
\n\n

Your function should return a single string response, which is the bot's response to the particular user input message. Your function can take into account the history of messages, as well as the current message.

\n\n

Let's take a look at a few examples.

\n\n

Example: a chatbot that responds yes or no

\n\n

Let's write a chat function that responds Yes or No randomly.

\n\n

Here's our chat function:

\n\n
import random\n\ndef random_response(message, history):\n    return random.choice([\"Yes\", \"No\"])\n
\n\n

Now, we can plug this into gr.ChatInterface() and call the .launch() method to create the web interface:

\n\n
import gradio as gr\n\ngr.ChatInterface(random_response).launch()\n
\n\n

That's it! Here's our running demo, try it out:

\n\n

\n\n

Another example using the user's input and history

\n\n

Of course, the previous example was very simplistic, it didn't even take user input or the previous history into account! Here's another simple example showing how to incorporate a user's input as well as the history.

\n\n
import random\nimport gradio as gr\n\ndef alternatingly_agree(message, history):\n    if len(history) % 2 == 0:\n        return f\"Yes, I do think that '{message}'\"\n    else:\n        return \"I don't think so\"\n\ngr.ChatInterface(alternatingly_agree).launch()\n
\n\n

Streaming chatbots

\n\n

If in your chat function, you use yield to generate a sequence of responses, you'll end up with a streaming chatbot. It's that simple!

\n\n
import time\nimport gradio as gr\n\ndef slow_echo(message, history):\n    for i in range(len(message)):\n        time.sleep(0.3)\n        yield \"You typed: \" + message[: i+1]\n\ngr.ChatInterface(slow_echo).queue().launch()\n
\n\n

Notice that we've enabled queuing, which is required to use generator functions. While the response is streaming, the \"Submit\" button turns into a \"Stop\" button that can be used to stop the generator function. You can customize the appearance and behavior of the \"Stop\" button using the stop_btn parameter.

\n\n

Customizing your chatbot

\n\n

If you're familiar with Gradio's Interface class, the gr.ChatInterface includes many of the same arguments that you can use to customize the look and feel of your Chatbot. For example, you can:

\n\n
    \n
  • add a title and description above your chatbot using title and description arguments.
  • \n
  • add a theme or custom css using theme and css arguments respectively.
  • \n
  • add examples and even enable cache_examples, which make it easier for users to try it out .
  • \n
  • You can change the text or disable each of the buttons that appear in the chatbot interface: submit_btn, retry_btn, undo_btn, clear_btn.
  • \n
\n\n

If you want to customize the gr.Chatbot or gr.Textbox that compose the ChatInterface, then you can pass in your own chatbot or textbox as well. Here's an example of how we can use these parameters:

\n\n
import gradio as gr\n\ndef yes_man(message, history):\n    if message.endswith(\"?\"):\n        return \"Yes\"\n    else:\n        return \"Ask me anything!\"\n\ngr.ChatInterface(\n    yes_man,\n    chatbot=gr.Chatbot(height=300),\n    textbox=gr.Textbox(placeholder=\"Ask me a yes or no question\", container=False, scale=7),\n    title=\"Yes Man\",\n    description=\"Ask Yes Man any question\",\n    theme=\"soft\",\n    examples=[\"Hello\", \"Am I cool?\", \"Are tomatoes vegetables?\"],\n    cache_examples=True,\n    retry_btn=None,\n    undo_btn=\"Delete Previous\",\n    clear_btn=\"Clear\",\n).launch()\n
\n\n

Additional Inputs

\n\n

You may want to add additional parameters to your chatbot and expose them to your users through the Chatbot UI. For example, suppose you want to add a textbox for a system prompt, or a slider that sets the number of tokens in the chatbot's response. The ChatInterface class supports an additional_inputs parameter which can be used to add additional input components.

\n\n

The additional_inputs parameters accepts a component or a list of components. You can pass the component instances directly, or use their string shortcuts (e.g. \"textbox\" instead of gr.Textbox()). If you pass in component instances, and they have not already been rendered, then the components will appear underneath the chatbot (and any examples) within a gr.Accordion(). You can set the label of this accordion using the additional_inputs_accordion_name parameter.

\n\n

Here's a complete example:

\n\n
import gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n    response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n    for i in range(min(len(response), int(tokens))):\n        time.sleep(0.05)\n        yield response[: i+1]\n\ndemo = gr.ChatInterface(echo, \n                        additional_inputs=[\n                            gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\"), \n                            gr.Slider(10, 100)\n                        ]\n                       )\n\nif __name__ == \"__main__\":\n    demo.queue().launch()\n
\n\n

If the components you pass into the additional_inputs have already been rendered in a parent gr.Blocks(), then they will not be re-rendered in the accordion. This provides flexibility in deciding where to lay out the input components. In the example below, we position the gr.Textbox() on top of the Chatbot UI, while keeping the slider underneath.

\n\n
import gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n    response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n    for i in range(min(len(response), int(tokens))):\n        time.sleep(0.05)\n        yield response[: i+1]\n\nwith gr.Blocks() as demo:\n    system_prompt = gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\")\n    slider = gr.Slider(10, 100, render=False)\n\n    gr.ChatInterface(\n        echo, additional_inputs=[system_prompt, slider]\n    )\n\ndemo.queue().launch()\n
\n\n

If you need to create something even more custom, then its best to construct the chatbot UI using the low-level gr.Blocks() API. We have a dedicated guide for that here.

\n\n

Using your chatbot via an API

\n\n

Once you've built your Gradio chatbot and are hosting it on Hugging Face Spaces or somewhere else, then you can query it with a simple API at the /chat endpoint. The endpoint just expects the user's message (and potentially additional inputs if you have set any using the additional_inputs parameter), and will return the response, internally keeping track of the messages sent so far.

\n\n

\n\n

To use the endpoint, you should use either the Gradio Python Client or the Gradio JS client.

\n\n

A langchain example

\n\n

Now, let's actually use the gr.ChatInterface with some real large language models. We'll start by using langchain on top of openai to build a general-purpose streaming chatbot application in 19 lines of code. You'll need to have an OpenAI key for this example (keep reading for the free, open-source equivalent!)

\n\n
from langchain.chat_models import ChatOpenAI\nfrom langchain.schema import AIMessage, HumanMessage\nimport openai\nimport gradio as gr\n\nos.envrion[\"OPENAI_API_KEY\"] = \"sk-...\"  # Replace with your key\n\nllm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')\n\ndef predict(message, history):\n    history_langchain_format = []\n    for human, ai in history:\n        history_langchain_format.append(HumanMessage(content=human))\n        history_langchain_format.append(AIMessage(content=ai))\n    history_langchain_format.append(HumanMessage(content=message))\n    gpt_response = llm(history_langchain_format)\n    return gpt_response.content\n\ngr.ChatInterface(predict).launch() \n
\n\n

A streaming example using openai

\n\n

Of course, we could also use the openai library directy. Here a similar example, but this time with streaming results as well:

\n\n
import openai\nimport gradio as gr\n\nopenai.api_key = \"sk-...\"  # Replace with your key\n\ndef predict(message, history):\n    history_openai_format = []\n    for human, assistant in history:\n        history_openai_format.append({\"role\": \"user\", \"content\": human })\n        history_openai_format.append({\"role\": \"assistant\", \"content\":assistant})\n    history_openai_format.append({\"role\": \"user\", \"content\": message})\n\n    response = openai.ChatCompletion.create(\n        model='gpt-3.5-turbo',\n        messages= history_openai_format,         \n        temperature=1.0,\n        stream=True\n    )\n\n    partial_message = \"\"\n    for chunk in response:\n        if len(chunk['choices'][0]['delta']) != 0:\n            partial_message = partial_message + chunk['choices'][0]['delta']['content']\n            yield partial_message \n\ngr.ChatInterface(predict).queue().launch() \n
\n\n

Example using a local, open-source LLM with Hugging Face

\n\n

Of course, in many cases you want to run a chatbot locally. Here's the equivalent example using Together's RedePajama model, from Hugging Face (this requires you to have a GPU with CUDA).

\n\n
import gradio as gr\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer\nfrom threading import Thread\n\ntokenizer = AutoTokenizer.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\")\nmodel = AutoModelForCausalLM.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\", torch_dtype=torch.float16)\nmodel = model.to('cuda:0')\n\nclass StopOnTokens(StoppingCriteria):\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n        stop_ids = [29, 0]\n        for stop_id in stop_ids:\n            if input_ids[0][-1] == stop_id:\n                return True\n        return False\n\ndef predict(message, history): \n\n    history_transformer_format = history + [[message, \"\"]]\n    stop = StopOnTokens()\n\n    messages = \"\".join([\"\".join([\"\\n:\"+item[0], \"\\n:\"+item[1]])  #curr_system_message + \n                for item in history_transformer_format])\n\n    model_inputs = tokenizer([messages], return_tensors=\"pt\").to(\"cuda\")\n    streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)\n    generate_kwargs = dict(\n        model_inputs,\n        streamer=streamer,\n        max_new_tokens=1024,\n        do_sample=True,\n        top_p=0.95,\n        top_k=1000,\n        temperature=1.0,\n        num_beams=1,\n        stopping_criteria=StoppingCriteriaList([stop])\n        )\n    t = Thread(target=model.generate, kwargs=generate_kwargs)\n    t.start()\n\n    partial_message  = \"\"\n    for new_token in streamer:\n        if new_token != '<':\n            partial_message += new_token\n            yield partial_message \n\n\ngr.ChatInterface(predict).queue().launch()\n
\n\n

With those examples, you should be all set to create your own Gradio Chatbot demos soon! For building even more custom Chatbot applications, check out a dedicated guide using the low-level gr.Blocks() API.

\n", - "tags": ["NLP", "TEXT", "CHAT"], - "spaces": [], - "url": "/guides/creating-a-chatbot-fast/", - "contributor": null - }, - { - "name": "sharing-your-app", - "category": "getting-started", - "pretty_category": "Getting Started", - "guide_index": 3, - "absolute_index": 2, - "pretty_name": "Sharing Your App", - "content": "# Sharing Your App\n\nHow to share your Gradio app: \n\n1. [Sharing demos with the share parameter](#sharing-demos)\n2. [Hosting on HF Spaces](#hosting-on-hf-spaces)\n3. [Embedding hosted spaces](#embedding-hosted-spaces)\n4. [Embedding with web components](#embedding-with-web-components)\n5. [Using the API page](#api-page)\n6. [Adding authentication to the page](#authentication)\n7. [Accessing Network Requests](#accessing-the-network-request-directly)\n8. [Mounting within FastAPI](#mounting-within-another-fast-api-app)\n9. [Security](#security-and-file-access)\n\n## Sharing Demos\n\nGradio demos can be easily shared publicly by setting `share=True` in the `launch()` method. Like this:\n\n```python\ndemo.launch(share=True)\n```\n\nThis generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: **XXXXX.gradio.app**. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.\n\nKeep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set `share=False` (the default, except in colab notebooks), only a local link is created, which can be shared by [port-forwarding](https://www.ssh.com/ssh/tunneling/example) with specific users. \n\n![sharing](https://github.com/gradio-app/gradio/blob/main/guides/assets/sharing.svg?raw=true)\n\nShare links expire after 72 hours.\n\n## Hosting on HF Spaces\n\nIf you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. [Hugging Face Spaces](http://huggingface.co/spaces/) provides the infrastructure to permanently host your machine learning model for free! \n\nAfter you have [created a free Hugging Face account](https://huggingface.co/join), you have three methods to deploy your Gradio app to Hugging Face Spaces:\n\n1. From terminal: run `gradio deploy` in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on `git push`.\n\n2. From your browser: Drag and drop a folder containing your Gradio model and all related files [here](https://huggingface.co/new-space).\n\n3. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See [this guide how to host on Hugging Face Spaces](https://huggingface.co/blog/gradio-spaces) for more information. \n\n\n\nNote: Some components, like `gr.Image`, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with `show_share_button`, such as `gr.Image(show_share_button=False)`. \n\n![Image with show_share_button=True](https://github.com/gradio-app/gradio/blob/main/guides/assets/share_icon.png?raw=true)\n\n## Embedding Hosted Spaces\n\nOnce you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.\n\nThere are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:\n\n![Embed this Space dropdown option](https://github.com/gradio-app/gradio/blob/main/guides/assets/embed_this_space.png?raw=true)\n\n### Embedding with Web Components\n\nWeb components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app. \n\nTo embed with Web Components:\n\n1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using). \n\n```html\n\n```\n\n2. Add \n```html\n\n```\n\nelement where you want to place the app. Set the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:\n\n\n```html\n\n```\n\n\n\nYou can see examples of how web components look on the Gradio landing page.\n\nYou can also customize the appearance and behavior of your web component with attributes that you pass into the `` tag:\n\n* `src`: as we've seen, the `src` attributes links to the URL of the hosted Gradio demo that you would like to embed\n* `space`: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a `username/space_name` instead of a full URL. Example: `gradio/Echocardiogram-Segmentation`. If this attribute attribute is provided, then `src` does not need to be provided.\n* `control_page_title`: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default `\"false\"`)\n* `initial_height`: the initial height of the web component while it is loading the Gradio app, (by default `\"300px\"`). Note that the final height is set based on the size of the Gradio app.\n* `container`: whether to show the border frame and information about where the Space is hosted (by default `\"true\"`)\n* `info`: whether to show just the information about where the Space is hosted underneath the embedded app (by default `\"true\"`)\n* `autoscroll`: whether to autoscroll to the output when prediction has finished (by default `\"false\"`)\n* `eager`: whether to load the Gradio app as soon as the page loads (by default `\"false\"`)\n* `theme_mode`: whether to use the `dark`, `light`, or default `system` theme mode (by default `\"system\"`)\n\nHere's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px. \n\n```html\n\n```\n\n_Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as `header { ... }` and `footer { ... }` will be the most likely to cause issues._\n\n### Embedding with IFrames\n\nTo embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:\n\n```html\n\n```\n\nAgain, you can find the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.\n\nNote: if you use IFrames, you'll probably want to add a fixed `height` attribute and set `style=\"border:0;\"` to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the `allow` attribute.\n\n## API Page\n\nYou can use almost any Gradio app as an API! In the footer of a Gradio app [like this one](https://huggingface.co/spaces/gradio/hello_world), you'll see a \"Use via API\" link. \n\n![Use via API](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/api3.gif)\n\nThis is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either [the Python client](https://gradio.app/guides/getting-started-with-the-python-client/), or [the JavaScript client](https://gradio.app/guides/getting-started-with-the-js-client/). For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.\n\nThe endpoints are automatically created when you launch a Gradio `Interface`. If you are using Gradio `Blocks`, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as\n\n```python\nbtn.click(add, [num1, num2], output, api_name=\"addition\")\n```\n\nThis will add and document the endpoint `/api/addition/` to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints. \n\n*Note*: For Gradio apps in which [queueing is enabled](https://gradio.app/guides/key-features#queuing), users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set `api_open=False` in the `queue()` method. To disable the API page altogether, set `show_api=False` in `.launch()`.\n\n## Authentication\n\nYou may wish to put an authentication page in front of your app to limit who can open your app. With the `auth=` keyword argument in the `launch()` method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":\n\n```python\ndemo.launch(auth=(\"admin\", \"pass1234\"))\n```\n\nFor more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.\n\nHere's an example of a function that accepts any login where the username and password are the same:\n\n```python\ndef same_auth(username, password):\n return username == password\ndemo.launch(auth=same_auth)\n```\n\nFor authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.\n\n## Accessing the Network Request Directly\n\nWhen a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is `gr.Request` and Gradio will pass in the network request as that parameter. Here is an example:\n\n```python\nimport gradio as gr\n\ndef echo(name, request: gr.Request):\n if request:\n print(\"Request headers dictionary:\", request.headers)\n print(\"IP address:\", request.client.host)\n return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n```\n\nNote: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then `request` will be `None`. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check `if request`.\n\n## Mounting Within Another FastAPI App\n\nIn some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with `gradio.mount_gradio_app()`.\n\nHere's a complete example:\n\n```python\nfrom fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n```\n\nNote that this approach also allows you run your Gradio apps on custom paths (`http://localhost:8000/gradio` in the example above).\n\n## Security and File Access\n\nSharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) **exposes** certain files on the host machine to users of your Gradio app. \n\nIn particular, Gradio apps ALLOW users to access to three kinds of files:\n\n* **Files in the same directory (or a subdirectory) of where the Gradio script is launched from.** For example, if the path to your gradio scripts is `/home/usr/scripts/project/app.py` and you launch it from `/home/usr/scripts/project/`, then users of your shared Gradio app will be able to access any files inside `/home/usr/scripts/project/`. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's `examples`).\n\n* **Temporary files created by Gradio.** These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable `GRADIO_TEMP_DIR` to an absolute path, such as `/home/usr/scripts/project/temp/`.\n\n* **Files that you explicitly allow via the `allowed_paths` parameter in `launch()`**. This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).\n\nGradio DOES NOT ALLOW access to:\n\n* **Dotfiles** (any files whose name begins with `'.'`) or any files that are contained in any directory whose name begins with `'.'`\n\n* **Files that you explicitly allow via the `blocked_paths` parameter in `launch()`**. You can pass in a list of additional directories or exact filepaths to the `blocked_paths` parameter in `launch()`. This parameter takes precedence over the files that Gradio exposes by default or by the `allowed_paths`.\n\n* **Any other paths on the host machine**. Users should NOT be able to access other arbitrary paths on the host. \n\nPlease make sure you are running the latest version of `gradio` for these security settings to apply. ", - "html": "

Sharing Your App

\n\n

How to share your Gradio app:

\n\n
    \n
  1. Sharing demos with the share parameter
  2. \n
  3. Hosting on HF Spaces
  4. \n
  5. Embedding hosted spaces
  6. \n
  7. Embedding with web components
  8. \n
  9. Using the API page
  10. \n
  11. Adding authentication to the page
  12. \n
  13. Accessing Network Requests
  14. \n
  15. Mounting within FastAPI
  16. \n
  17. Security
  18. \n
\n\n

Sharing Demos

\n\n

Gradio demos can be easily shared publicly by setting share=True in the launch() method. Like this:

\n\n
demo.launch(share=True)\n
\n\n

This generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: XXXXX.gradio.app. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.

\n\n

Keep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set share=False (the default, except in colab notebooks), only a local link is created, which can be shared by port-forwarding with specific users.

\n\n

\"sharing\"

\n\n

Share links expire after 72 hours.

\n\n

Hosting on HF Spaces

\n\n

If you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. Hugging Face Spaces provides the infrastructure to permanently host your machine learning model for free!

\n\n

After you have created a free Hugging Face account, you have three methods to deploy your Gradio app to Hugging Face Spaces:

\n\n
    \n
  1. From terminal: run gradio deploy in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on git push.

  2. \n
  3. From your browser: Drag and drop a folder containing your Gradio model and all related files here.

  4. \n
  5. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See this guide how to host on Hugging Face Spaces for more information.

  6. \n
\n\n

\n\n

Note: Some components, like gr.Image, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with show_share_button, such as gr.Image(show_share_button=False).

\n\n

\"Imagesharebutton=True\" />

\n\n

Embedding Hosted Spaces

\n\n

Once you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.

\n\n

There are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:

\n\n

\"Embed

\n\n

Embedding with Web Components

\n\n

Web components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app.

\n\n

To embed with Web Components:

\n\n
    \n
  1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using).
  2. \n
\n\n
\n
\n\n
    \n
  1. Add
  2. \n
\n\n
\n
\n\n

element where you want to place the app. Set the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:

\n\n
\n
\n\n\n\n

You can see examples of how web components look on the Gradio landing page.

\n\n

You can also customize the appearance and behavior of your web component with attributes that you pass into the <gradio-app> tag:

\n\n
    \n
  • src: as we've seen, the src attributes links to the URL of the hosted Gradio demo that you would like to embed
  • \n
  • space: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a username/space_name instead of a full URL. Example: gradio/Echocardiogram-Segmentation. If this attribute attribute is provided, then src does not need to be provided.
  • \n
  • control_page_title: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default \"false\")
  • \n
  • initial_height: the initial height of the web component while it is loading the Gradio app, (by default \"300px\"). Note that the final height is set based on the size of the Gradio app.
  • \n
  • container: whether to show the border frame and information about where the Space is hosted (by default \"true\")
  • \n
  • info: whether to show just the information about where the Space is hosted underneath the embedded app (by default \"true\")
  • \n
  • autoscroll: whether to autoscroll to the output when prediction has finished (by default \"false\")
  • \n
  • eager: whether to load the Gradio app as soon as the page loads (by default \"false\")
  • \n
  • theme_mode: whether to use the dark, light, or default system theme mode (by default \"system\")
  • \n
\n\n

Here's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px.

\n\n
\n
\n\n

Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as header { ... } and footer { ... } will be the most likely to cause issues.

\n\n

Embedding with IFrames

\n\n

To embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:

\n\n
\n
\n\n

Again, you can find the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.

\n\n

Note: if you use IFrames, you'll probably want to add a fixed height attribute and set style=\"border:0;\" to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the allow attribute.

\n\n

API Page

\n\n

You can use almost any Gradio app as an API! In the footer of a Gradio app like this one, you'll see a \"Use via API\" link.

\n\n

\"Use

\n\n

This is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either the Python client, or the JavaScript client. For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.

\n\n

The endpoints are automatically created when you launch a Gradio Interface. If you are using Gradio Blocks, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as

\n\n
btn.click(add, [num1, num2], output, api_name=\"addition\")\n
\n\n

This will add and document the endpoint /api/addition/ to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints.

\n\n

Note: For Gradio apps in which queueing is enabled, users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set api_open=False in the queue() method. To disable the API page altogether, set show_api=False in .launch().

\n\n

Authentication

\n\n

You may wish to put an authentication page in front of your app to limit who can open your app. With the auth= keyword argument in the launch() method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":

\n\n
demo.launch(auth=(\"admin\", \"pass1234\"))\n
\n\n

For more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.

\n\n

Here's an example of a function that accepts any login where the username and password are the same:

\n\n
def same_auth(username, password):\n    return username == password\ndemo.launch(auth=same_auth)\n
\n\n

For authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.

\n\n

Accessing the Network Request Directly

\n\n

When a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is gr.Request and Gradio will pass in the network request as that parameter. Here is an example:

\n\n
import gradio as gr\n\ndef echo(name, request: gr.Request):\n    if request:\n        print(\"Request headers dictionary:\", request.headers)\n        print(\"IP address:\", request.client.host)\n    return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n
\n\n

Note: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then request will be None. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check if request.

\n\n

Mounting Within Another FastAPI App

\n\n

In some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with gradio.mount_gradio_app().

\n\n

Here's a complete example:

\n\n
from fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n    return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n
\n\n

Note that this approach also allows you run your Gradio apps on custom paths (http://localhost:8000/gradio in the example above).

\n\n

Security and File Access

\n\n

Sharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) exposes certain files on the host machine to users of your Gradio app.

\n\n

In particular, Gradio apps ALLOW users to access to three kinds of files:

\n\n
    \n
  • Files in the same directory (or a subdirectory) of where the Gradio script is launched from. For example, if the path to your gradio scripts is /home/usr/scripts/project/app.py and you launch it from /home/usr/scripts/project/, then users of your shared Gradio app will be able to access any files inside /home/usr/scripts/project/. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's examples).

  • \n
  • Temporary files created by Gradio. These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable GRADIO_TEMP_DIR to an absolute path, such as /home/usr/scripts/project/temp/.

  • \n
  • Files that you explicitly allow via the allowed_paths parameter in launch(). This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).

  • \n
\n\n

Gradio DOES NOT ALLOW access to:

\n\n
    \n
  • Dotfiles (any files whose name begins with '.') or any files that are contained in any directory whose name begins with '.'

  • \n
  • Files that you explicitly allow via the blocked_paths parameter in launch(). You can pass in a list of additional directories or exact filepaths to the blocked_paths parameter in launch(). This parameter takes precedence over the files that Gradio exposes by default or by the allowed_paths.

  • \n
  • Any other paths on the host machine. Users should NOT be able to access other arbitrary paths on the host.

  • \n
\n\n

Please make sure you are running the latest version of gradio for these security settings to apply.

\n", - "tags": [], - "spaces": [], - "url": "/guides/sharing-your-app/", - "contributor": null - } - ], - "parent": "gradio" - }, - "interface": { - "class": null, - "name": "Interface", - "description": "Interface is Gradio's main high-level class, and allows you to create a web-based GUI / demo around a machine learning model (or any Python function) in a few lines of code. You must specify three parameters: (1) the function to create a GUI for (2) the desired input components and (3) the desired output components. Additional parameters can be used to control the appearance and behavior of the demo.
", - "tags": { - "demos": "hello_world, hello_world_3, gpt2_xl", - "guides": "quickstart, key-features, sharing-your-app, interface-state, reactive-interfaces, advanced-interface-features, setting-up-a-gradio-demo-for-maximum-performance" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "fn", - "annotation": "Callable", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "str | IOComponent | list[str | IOComponent] | None", - "doc": "a single Gradio component, or list of Gradio components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of input components should match the number of parameters in fn. If set to None, then only the output components will be displayed." - }, - { - "name": "outputs", - "annotation": "str | IOComponent | list[str | IOComponent] | None", - "doc": "a single Gradio component, or list of Gradio components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of output components should match the number of values returned by fn. If set to None, then only the input components will be displayed." - }, - { - "name": "examples", - "annotation": "list[Any] | list[list[Any]] | str | None", - "doc": "sample inputs for the function; if provided, appear below the UI components and can be clicked to populate the interface. Should be nested list, in which the outer list consists of samples and each inner list consists of an input corresponding to each input component. A string path to a directory of examples can also be provided, but it should be within the directory with the python file running the gradio app. If there are multiple input components and a directory is provided, a log.csv file must be present in the directory to link corresponding inputs.", - "default": "None" - }, - { - "name": "cache_examples", - "annotation": "bool | None", - "doc": "If True, caches examples in the server for fast runtime in examples. If `fn` is a generator function, then the last yielded value will be used as the output. The default option in HuggingFace Spaces is True. The default option elsewhere is False.", - "default": "None" - }, - { - "name": "examples_per_page", - "annotation": "int", - "doc": "If examples are provided, how many to display per page.", - "default": "10" - }, - { - "name": "live", - "annotation": "bool", - "doc": "whether the interface should automatically rerun if any of the inputs change.", - "default": "False" - }, - { - "name": "interpretation", - "annotation": "Callable | str | None", - "doc": "function that provides interpretation explaining prediction output. Pass \"default\" to use simple built-in interpreter, \"shap\" to use a built-in shapley-based interpreter, or your own custom interpretation function. For more information on the different interpretation methods, see the Advanced Interface Features guide.", - "default": "None" - }, - { - "name": "num_shap", - "annotation": "float", - "doc": "a multiplier that determines how many examples are computed for shap-based interpretation. Increasing this value will increase shap runtime, but improve results. Only applies if interpretation is \"shap\".", - "default": "2.0" - }, - { - "name": "title", - "annotation": "str | None", - "doc": "a title for the interface; if provided, appears above the input and output components in large font. Also used as the tab title when opened in a browser window.", - "default": "None" - }, - { - "name": "description", - "annotation": "str | None", - "doc": "a description for the interface; if provided, appears above the input and output components and beneath the title in regular font. Accepts Markdown and HTML content.", - "default": "None" - }, - { - "name": "article", - "annotation": "str | None", - "doc": "an expanded article explaining the interface; if provided, appears below the input and output components in regular font. Accepts Markdown and HTML content.", - "default": "None" - }, - { - "name": "thumbnail", - "annotation": "str | None", - "doc": "path or url to image to use as display image when the web demo is shared on social media.", - "default": "None" - }, - { - "name": "theme", - "annotation": "Theme | str | None", - "doc": "Theme to use, loaded from gradio.themes.", - "default": "None" - }, - { - "name": "css", - "annotation": "str | None", - "doc": "custom css or path to custom css file to use with interface.", - "default": "None" - }, - { - "name": "allow_flagging", - "annotation": "str | None", - "doc": "one of \"never\", \"auto\", or \"manual\". If \"never\" or \"auto\", users will not see a button to flag an input and output. If \"manual\", users will see a button to flag. If \"auto\", every input the user submits will be automatically flagged (outputs are not flagged). If \"manual\", both the input and outputs are flagged when the user clicks flag button. This parameter can be set with environmental variable GRADIO_ALLOW_FLAGGING; otherwise defaults to \"manual\".", - "default": "None" - }, - { - "name": "flagging_options", - "annotation": "list[str] | list[tuple[str, str]] | None", - "doc": "if provided, allows user to select from the list of options when flagging. Only applies if allow_flagging is \"manual\". Can either be a list of tuples of the form (label, value), where label is the string that will be displayed on the button and value is the string that will be stored in the flagging CSV; or it can be a list of strings [\"X\", \"Y\"], in which case the values will be the list of strings and the labels will [\"Flag as X\", \"Flag as Y\"], etc.", - "default": "None" - }, - { - "name": "flagging_dir", - "annotation": "str", - "doc": "what to name the directory where flagged data is stored.", - "default": "\"flagged\"" - }, - { - "name": "flagging_callback", - "annotation": "FlaggingCallback", - "doc": "An instance of a subclass of FlaggingCallback which will be called when a sample is flagged. By default logs to a local CSV file.", - "default": "CSVLogger()" - }, - { - "name": "analytics_enabled", - "annotation": "bool | None", - "doc": "Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "allow_duplication", - "annotation": "bool", - "doc": "If True, then will show a 'Duplicate Spaces' button on Hugging Face Spaces.", - "default": "False" - } - ], - "returns": { "annotation": null }, - "example": "import gradio as gr\n\ndef image_classifier(inp):\n return {'cat': 0.3, 'dog': 0.7}\n\ndemo = gr.Interface(fn=image_classifier, inputs=\"image\", outputs=\"label\")\ndemo.launch()", - "fns": [ - { - "fn": null, - "name": "launch", - "description": "Launches a simple web server that serves the demo. Can also be used to create a public link used by anyone to access the demo from their browser by setting share=True.
", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "inline", - "annotation": "bool | None", - "doc": "whether to display in the interface inline in an iframe. Defaults to True in python notebooks; False otherwise.", - "default": "None" - }, - { - "name": "inbrowser", - "annotation": "bool", - "doc": "whether to automatically launch the interface in a new tab on the default browser.", - "default": "False" - }, - { - "name": "share", - "annotation": "bool | None", - "doc": "whether to create a publicly shareable link for the interface. Creates an SSH tunnel to make your UI accessible from anywhere. If not provided, it is set to False by default every time, except when running in Google Colab. When localhost is not accessible (e.g. Google Colab), setting share=False is not supported.", - "default": "None" - }, - { - "name": "debug", - "annotation": "bool", - "doc": "if True, blocks the main thread from running. If running in Google Colab, this is needed to print the errors in the cell output.", - "default": "False" - }, - { - "name": "enable_queue", - "annotation": "bool | None", - "doc": "DEPRECATED (use .queue() method instead.) if True, inference requests will be served through a queue instead of with parallel threads. Required for longer inference times (> 1min) to prevent timeout. The default option in HuggingFace Spaces is True. The default option elsewhere is False.", - "default": "None" - }, - { - "name": "max_threads", - "annotation": "int", - "doc": "the maximum number of total threads that the Gradio app can generate in parallel. The default is inherited from the starlette library (currently 40). Applies whether the queue is enabled or not. But if queuing is enabled, this parameter is increaseed to be at least the concurrency_count of the queue.", - "default": "40" - }, - { - "name": "auth", - "annotation": "Callable | tuple[str, str] | list[tuple[str, str]] | None", - "doc": "If provided, username and password (or list of username-password tuples) required to access interface. Can also provide function that takes username and password and returns True if valid login.", - "default": "None" - }, - { - "name": "auth_message", - "annotation": "str | None", - "doc": "If provided, HTML message provided on login page.", - "default": "None" - }, - { - "name": "prevent_thread_lock", - "annotation": "bool", - "doc": "If True, the interface will block the main thread while the server is running.", - "default": "False" - }, - { - "name": "show_error", - "annotation": "bool", - "doc": "If True, any errors in the interface will be displayed in an alert modal and printed in the browser console log", - "default": "False" - }, - { - "name": "server_name", - "annotation": "str | None", - "doc": "to make app accessible on local network, set this to \"0.0.0.0\". Can be set by environment variable GRADIO_SERVER_NAME. If None, will use \"127.0.0.1\".", - "default": "None" - }, - { - "name": "server_port", - "annotation": "int | None", - "doc": "will start gradio app on this port (if available). Can be set by environment variable GRADIO_SERVER_PORT. If None, will search for an available port starting at 7860.", - "default": "None" - }, - { - "name": "show_tips", - "annotation": "bool", - "doc": "if True, will occasionally show tips about new Gradio features", - "default": "False" - }, - { - "name": "height", - "annotation": "int", - "doc": "The height in pixels of the iframe element containing the interface (used if inline=True)", - "default": "500" - }, - { - "name": "width", - "annotation": "int | str", - "doc": "The width in pixels of the iframe element containing the interface (used if inline=True)", - "default": "\"100%\"" - }, - { - "name": "encrypt", - "annotation": "bool | None", - "doc": "DEPRECATED. Has no effect.", - "default": "None" - }, - { - "name": "favicon_path", - "annotation": "str | None", - "doc": "If a path to a file (.png, .gif, or .ico) is provided, it will be used as the favicon for the web page.", - "default": "None" - }, - { - "name": "ssl_keyfile", - "annotation": "str | None", - "doc": "If a path to a file is provided, will use this as the private key file to create a local server running on https.", - "default": "None" - }, - { - "name": "ssl_certfile", - "annotation": "str | None", - "doc": "If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.", - "default": "None" - }, - { - "name": "ssl_keyfile_password", - "annotation": "str | None", - "doc": "If a password is provided, will use this with the ssl certificate for https.", - "default": "None" - }, - { - "name": "ssl_verify", - "annotation": "bool", - "doc": "If False, skips certificate validation which allows self-signed certificates to be used.", - "default": "True" - }, - { - "name": "quiet", - "annotation": "bool", - "doc": "If True, suppresses most print statements.", - "default": "False" - }, - { - "name": "show_api", - "annotation": "bool", - "doc": "If True, shows the api docs in the footer of the app. Default True. If the queue is enabled, then api_open parameter of .queue() will determine if the api docs are shown, independent of the value of show_api.", - "default": "True" - }, - { - "name": "file_directories", - "annotation": "list[str] | None", - "doc": "This parameter has been renamed to `allowed_paths`. It will be removed in a future version.", - "default": "None" - }, - { - "name": "allowed_paths", - "annotation": "list[str] | None", - "doc": "List of complete filepaths or parent directories that gradio is allowed to serve (in addition to the directory containing the gradio python file). Must be absolute paths. Warning: if you provide directories, any files in these directories or their subdirectories are accessible to all users of your app.", - "default": "None" - }, - { - "name": "blocked_paths", - "annotation": "list[str] | None", - "doc": "List of complete filepaths or parent directories that gradio is not allowed to serve (i.e. users of your app are not allowed to access). Must be absolute paths. Warning: takes precedence over `allowed_paths` and all other directories exposed by Gradio by default.", - "default": "None" - }, - { - "name": "root_path", - "annotation": "str", - "doc": "The root path (or \"mount point\") of the application, if it's not served from the root (\"/\") of the domain. Often used when the application is behind a reverse proxy that forwards requests to the application. For example, if the application is served at \"https://example.com/myapp\", the `root_path` should be set to \"/myapp\".", - "default": "\"\"" - }, - { - "name": "app_kwargs", - "annotation": "dict[str, Any] | None", - "doc": "Additional keyword arguments to pass to the underlying FastAPI app as a dictionary of parameter keys and argument values. For example, `{\"docs_url\": \"/docs\"}`", - "default": "None" - } - ], - "returns": {}, - "example": "import gradio as gr\ndef reverse(text):\n return text[::-1]\ndemo = gr.Interface(reverse, \"text\", \"text\")\ndemo.launch(share=True, auth=(\"username\", \"password\"))", - "override_signature": null, - "parent": "gradio.Interface" - }, - { - "fn": null, - "name": "load", - "description": "Warning: this method will be deprecated. Use the equivalent `gradio.load()` instead. This is a class method that constructs a Blocks from a Hugging Face repo. Can accept model repos (if src is \"models\") or Space repos (if src is \"spaces\"). The input and output components are automatically loaded from the repo.", - "tags": {}, - "parameters": [ - { - "name": "name", - "annotation": "str", - "doc": "the name of the model (e.g. \"gpt2\" or \"facebook/bart-base\") or space (e.g. \"flax-community/spanish-gpt2\"), can include the `src` as prefix (e.g. \"models/facebook/bart-base\")" - }, - { - "name": "src", - "annotation": "str | None", - "doc": "the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)", - "default": "None" - }, - { - "name": "api_key", - "annotation": "str | None", - "doc": "optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.", - "default": "None" - }, - { - "name": "alias", - "annotation": "str | None", - "doc": "optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)", - "default": "None" - } - ], - "returns": { - "annotation": "Blocks", - "doc": "a Gradio Interface object for the given model" - }, - "example": null, - "override_signature": null, - "parent": "gradio.Interface" - }, - { - "fn": null, - "name": "from_pipeline", - "description": "Class method that constructs an Interface from a Hugging Face transformers.Pipeline object. The input and output components are automatically determined from the pipeline.", - "tags": {}, - "parameters": [ - { - "name": "pipeline", - "annotation": "Pipeline", - "doc": "the pipeline object to use." - } - ], - "returns": { - "annotation": "Interface", - "doc": "a Gradio Interface object from the given Pipeline" - }, - "example": "import gradio as gr\nfrom transformers import pipeline\npipe = pipeline(\"image-classification\")\ngr.Interface.from_pipeline(pipe).launch()", - "override_signature": null, - "parent": "gradio.Interface" - }, - { - "fn": null, - "name": "integrate", - "description": "A catch-all method for integrating with other libraries. This method should be run after launch()", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "comet_ml", - "annotation": "", - "doc": "If a comet_ml Experiment object is provided, will integrate with the experiment and appear on Comet dashboard", - "default": "None" - }, - { - "name": "wandb", - "annotation": "ModuleType | None", - "doc": "If the wandb module is provided, will integrate with it and appear on WandB dashboard", - "default": "None" - }, - { - "name": "mlflow", - "annotation": "ModuleType | None", - "doc": "If the mlflow module is provided, will integrate with the experiment and appear on ML Flow dashboard", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Interface" - }, - { - "fn": null, - "name": "queue", - "description": "You can control the rate of processed requests by creating a queue. This will allow you to set the number of requests to be processed at one time, and will let users know their position in the queue.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "concurrency_count", - "annotation": "int", - "doc": "Number of worker threads that will be processing requests from the queue concurrently. Increasing this number will increase the rate at which requests are processed, but will also increase the memory usage of the queue.", - "default": "1" - }, - { - "name": "status_update_rate", - "annotation": "float | Literal['auto']", - "doc": "If \"auto\", Queue will send status estimations to all clients whenever a job is finished. Otherwise Queue will send status at regular intervals set by this parameter as the number of seconds.", - "default": "\"auto\"" - }, - { - "name": "client_position_to_load_data", - "annotation": "int | None", - "doc": "DEPRECATED. This parameter is deprecated and has no effect.", - "default": "None" - }, - { - "name": "default_enabled", - "annotation": "bool | None", - "doc": "Deprecated and has no effect.", - "default": "None" - }, - { - "name": "api_open", - "annotation": "bool", - "doc": "If True, the REST routes of the backend will be open, allowing requests made directly to those endpoints to skip the queue.", - "default": "True" - }, - { - "name": "max_size", - "annotation": "int | None", - "doc": "The maximum number of events the queue will store at any given moment. If the queue is full, new events will not be added and a user will receive a message saying that the queue is full. If None, the queue size will be unlimited.", - "default": "None" - } - ], - "returns": {}, - "example": "demo = gr.Interface(image_generator, gr.Textbox(), gr.Image())\ndemo.queue(concurrency_count=3)\ndemo.launch()", - "override_signature": null, - "parent": "gradio.Interface" - } - ], - "demos": [ - [ - "hello_world", - "import gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n \nif __name__ == \"__main__\":\n demo.launch() " - ], - [ - "hello_world_3", - "import gradio as gr\n\ndef greet(name, is_morning, temperature):\n salutation = \"Good morning\" if is_morning else \"Good evening\"\n greeting = f\"{salutation} {name}. It is {temperature} degrees today\"\n celsius = (temperature - 32) * 5 / 9\n return greeting, round(celsius, 2)\n\ndemo = gr.Interface(\n fn=greet,\n inputs=[\"text\", \"checkbox\", gr.Slider(0, 100)],\n outputs=[\"text\", \"number\"],\n)\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "gpt2_xl", - "import gradio as gr\n\ntitle = \"gpt2-xl\"\n\nexamples = [\n [\"The tower is 324 metres (1,063 ft) tall,\"],\n [\"The Moon's orbit around Earth has\"],\n [\"The smooth Borealis basin in the Northern Hemisphere covers 40%\"],\n]\n\ndemo = gr.load(\n \"huggingface/gpt2-xl\",\n inputs=gr.Textbox(lines=5, max_lines=6, label=\"Input Text\"),\n title=title,\n examples=examples,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [ - { - "name": "quickstart", - "category": "getting-started", - "pretty_category": "Getting Started", - "guide_index": 1, - "absolute_index": 0, - "pretty_name": "Quickstart", - "content": "# Quickstart\n\n**Prerequisite**: Gradio requires Python 3.8 or higher, that's all!\n\n## What Does Gradio Do?\n\nOne of the *best ways to share* your machine learning model, API, or data science workflow with others is to create an **interactive app** that allows your users or colleagues to try out the demo in their browsers.\n\nGradio allows you to **build demos and share them, all in Python.** And usually in just a few lines of code! So let's get started.\n\n## Hello, World\n\nTo get Gradio running with a simple \"Hello, World\" example, follow these three steps:\n\n1\\. Install Gradio using pip:\n\n```bash\npip install gradio\n```\n\n2\\. Run the code below as a Python script or in a Jupyter Notebook (or [Google Colab](https://colab.research.google.com/drive/18ODkJvyxHutTN0P5APWyGFO_xwNcgHDZ?usp=sharing)):\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n \ndemo.launch() \n```\n\nWe shorten the imported name to `gr` for better readability of code using Gradio. This is a widely adopted convention that you should follow so that anyone working with your code can easily understand it.\n\n3\\. The demo below will appear automatically within the Jupyter Notebook, or pop in a browser on [http://localhost:7860](http://localhost:7860) if running from a script:\n\n\n\nWhen developing locally, if you want to run the code as a Python script, you can use the Gradio CLI to launch the application **in reload mode**, which will provide seamless and fast development. Learn more about reloading in the [Auto-Reloading Guide](https://gradio.app/developing-faster-with-reload-mode/).\n\n```bash\ngradio app.py\n```\n\nNote: you can also do `python app.py`, but it won't provide the automatic reload mechanism.\n\n## The `Interface` Class\n\nYou'll notice that in order to make the demo, we created a `gr.Interface`. This `Interface` class can wrap any Python function with a user interface. In the example above, we saw a simple text-based function, but the function could be anything from music generator to a tax calculator to the prediction function of a pretrained machine learning model.\n\nThe core `Interface` class is initialized with three required parameters:\n\n- `fn`: the function to wrap a UI around\n- `inputs`: which component(s) to use for the input (e.g. `\"text\"`, `\"image\"` or `\"audio\"`)\n- `outputs`: which component(s) to use for the output (e.g. `\"text\"`, `\"image\"` or `\"label\"`)\n\nLet's take a closer look at these components used to provide input and output.\n\n## Components Attributes\n\nWe saw some simple `Textbox` components in the previous examples, but what if you want to change how the UI components look or behave?\n\nLet's say you want to customize the input text field \u2014 for example, you wanted it to be larger and have a text placeholder. If we use the actual class for `Textbox` instead of using the string shortcut, you have access to much more customizability through component attributes.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(\n fn=greet,\n inputs=gr.Textbox(lines=2, placeholder=\"Name Here...\"),\n outputs=\"text\",\n)\ndemo.launch()\n\n```\n\n\n## Multiple Input and Output Components\n\nSuppose you had a more complex function, with multiple inputs and outputs. In the example below, we define a function that takes a string, boolean, and number, and returns a string and number. Take a look how you pass a list of input and output components.\n\n```python\nimport gradio as gr\n\ndef greet(name, is_morning, temperature):\n salutation = \"Good morning\" if is_morning else \"Good evening\"\n greeting = f\"{salutation} {name}. It is {temperature} degrees today\"\n celsius = (temperature - 32) * 5 / 9\n return greeting, round(celsius, 2)\n\ndemo = gr.Interface(\n fn=greet,\n inputs=[\"text\", \"checkbox\", gr.Slider(0, 100)],\n outputs=[\"text\", \"number\"],\n)\ndemo.launch()\n\n```\n\n\nYou simply wrap the components in a list. Each component in the `inputs` list corresponds to one of the parameters of the function, in order. Each component in the `outputs` list corresponds to one of the values returned by the function, again in order.\n\n## An Image Example\n\nGradio supports many types of components, such as `Image`, `DataFrame`, `Video`, or `Label`. Let's try an image-to-image function to get a feel for these!\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n sepia_filter = np.array([\n [0.393, 0.769, 0.189], \n [0.349, 0.686, 0.168], \n [0.272, 0.534, 0.131]\n ])\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n```\n\n\nWhen using the `Image` component as input, your function will receive a NumPy array with the shape `(height, width, 3)`, where the last dimension represents the RGB values. We'll return an image as well in the form of a NumPy array.\n\nYou can also set the datatype used by the component with the `type=` keyword argument. For example, if you wanted your function to take a file path to an image instead of a NumPy array, the input `Image` component could be written as:\n\n```python\ngr.Image(type=\"filepath\", shape=...)\n```\n\nAlso note that our input `Image` component comes with an edit button \ud83d\udd89, which allows for cropping and zooming into images. Manipulating images in this way can help reveal biases or hidden flaws in a machine learning model!\n\nYou can read more about the many components and how to use them in the [Gradio docs](https://gradio.app/docs).\n\n## Chatbots\n\nGradio includes a high-level class, `gr.ChatInterface`, which is similar to `gr.Interface`, but is specifically designed for chatbot UIs. The `gr.ChatInterface` class also wraps a function but this function must have a specific signature. The function should take two arguments: `message` and then `history` (the arguments can be named anything, but must be in this order)\n\n* `message`: a `str` representing the user's input\n* `history`: a `list` of `list` representing the conversations up until that point. Each inner list consists of two `str` representing a pair: `[user input, bot response]`. \n\nYour function should return a single string response, which is the bot's response to the particular user input `message`.\n\nOther than that, `gr.ChatInterface` has no required parameters (though several are available for customization of the UI).\n\nHere's a toy example:\n\n```python\nimport random\nimport gradio as gr\n\ndef random_response(message, history):\n return random.choice([\"Yes\", \"No\"])\n\ndemo = gr.ChatInterface(random_response)\n\ndemo.launch()\n\n```\n\n\nYou can [read more about `gr.ChatInterface` here](https://gradio.app/guides/creating-a-chatbot-fast).\n\n## Blocks: More Flexibility and Control\n\nGradio offers two approaches to build apps:\n\n1\\. **Interface** and **ChatInterface**, which provide a high-level abstraction for creating demos that we've been discussing so far.\n\n2\\. **Blocks**, a low-level API for designing web apps with more flexible layouts and data flows. Blocks allows you to do things like feature multiple data flows and demos, control where components appear on the page, handle complex data flows (e.g. outputs can serve as inputs to other functions), and update properties/visibility of components based on user interaction \u2014 still all in Python. If this customizability is what you need, try `Blocks` instead!\n\n## Hello, Blocks\n\nLet's take a look at a simple example. Note how the API here differs from `Interface`.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n name = gr.Textbox(label=\"Name\")\n output = gr.Textbox(label=\"Output Box\")\n greet_btn = gr.Button(\"Greet\")\n greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n \n\ndemo.launch()\n```\n\n\nThings to note:\n\n- `Blocks` are made with a `with` clause, and any component created inside this clause is automatically added to the app.\n- Components appear vertically in the app in the order they are created. (Later we will cover customizing layouts!)\n- A `Button` was created, and then a `click` event-listener was added to this button. The API for this should look familiar! Like an `Interface`, the `click` method takes a Python function, input components, and output components.\n\n## More Complexity\n\nHere's an app to give you a taste of what's possible with `Blocks`:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nA lot more going on here! We'll cover how to create complex `Blocks` apps like this in the [building with blocks](https://gradio.app/building_with_blocks) section for you.\n\nCongrats, you're now familiar with the basics of Gradio! \ud83e\udd73 Go to our [next guide](https://gradio.app/key_features) to learn more about the key features of Gradio.\n", - "html": "

Quickstart

\n\n

Prerequisite: Gradio requires Python 3.8 or higher, that's all!

\n\n

What Does Gradio Do?

\n\n

One of the best ways to share your machine learning model, API, or data science workflow with others is to create an interactive app that allows your users or colleagues to try out the demo in their browsers.

\n\n

Gradio allows you to build demos and share them, all in Python. And usually in just a few lines of code! So let's get started.

\n\n

Hello, World

\n\n

To get Gradio running with a simple \"Hello, World\" example, follow these three steps:

\n\n

1. Install Gradio using pip:

\n\n
pip install gradio\n
\n\n

2. Run the code below as a Python script or in a Jupyter Notebook (or Google Colab):

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n\ndemo.launch()   \n
\n\n

We shorten the imported name to gr for better readability of code using Gradio. This is a widely adopted convention that you should follow so that anyone working with your code can easily understand it.

\n\n

3. The demo below will appear automatically within the Jupyter Notebook, or pop in a browser on http://localhost:7860 if running from a script:

\n\n

\n\n

When developing locally, if you want to run the code as a Python script, you can use the Gradio CLI to launch the application in reload mode, which will provide seamless and fast development. Learn more about reloading in the Auto-Reloading Guide.

\n\n
gradio app.py\n
\n\n

Note: you can also do python app.py, but it won't provide the automatic reload mechanism.

\n\n

The Interface Class

\n\n

You'll notice that in order to make the demo, we created a gr.Interface. This Interface class can wrap any Python function with a user interface. In the example above, we saw a simple text-based function, but the function could be anything from music generator to a tax calculator to the prediction function of a pretrained machine learning model.

\n\n

The core Interface class is initialized with three required parameters:

\n\n
    \n
  • fn: the function to wrap a UI around
  • \n
  • inputs: which component(s) to use for the input (e.g. \"text\", \"image\" or \"audio\")
  • \n
  • outputs: which component(s) to use for the output (e.g. \"text\", \"image\" or \"label\")
  • \n
\n\n

Let's take a closer look at these components used to provide input and output.

\n\n

Components Attributes

\n\n

We saw some simple Textbox components in the previous examples, but what if you want to change how the UI components look or behave?

\n\n

Let's say you want to customize the input text field \u2014 for example, you wanted it to be larger and have a text placeholder. If we use the actual class for Textbox instead of using the string shortcut, you have access to much more customizability through component attributes.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(\n    fn=greet,\n    inputs=gr.Textbox(lines=2, placeholder=\"Name Here...\"),\n    outputs=\"text\",\n)\ndemo.launch()\n\n
\n\n

\n\n

Multiple Input and Output Components

\n\n

Suppose you had a more complex function, with multiple inputs and outputs. In the example below, we define a function that takes a string, boolean, and number, and returns a string and number. Take a look how you pass a list of input and output components.

\n\n
import gradio as gr\n\ndef greet(name, is_morning, temperature):\n    salutation = \"Good morning\" if is_morning else \"Good evening\"\n    greeting = f\"{salutation} {name}. It is {temperature} degrees today\"\n    celsius = (temperature - 32) * 5 / 9\n    return greeting, round(celsius, 2)\n\ndemo = gr.Interface(\n    fn=greet,\n    inputs=[\"text\", \"checkbox\", gr.Slider(0, 100)],\n    outputs=[\"text\", \"number\"],\n)\ndemo.launch()\n\n
\n\n

\n\n

You simply wrap the components in a list. Each component in the inputs list corresponds to one of the parameters of the function, in order. Each component in the outputs list corresponds to one of the values returned by the function, again in order.

\n\n

An Image Example

\n\n

Gradio supports many types of components, such as Image, DataFrame, Video, or Label. Let's try an image-to-image function to get a feel for these!

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n    sepia_filter = np.array([\n        [0.393, 0.769, 0.189], \n        [0.349, 0.686, 0.168], \n        [0.272, 0.534, 0.131]\n    ])\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n
\n\n

\n\n

When using the Image component as input, your function will receive a NumPy array with the shape (height, width, 3), where the last dimension represents the RGB values. We'll return an image as well in the form of a NumPy array.

\n\n

You can also set the datatype used by the component with the type= keyword argument. For example, if you wanted your function to take a file path to an image instead of a NumPy array, the input Image component could be written as:

\n\n
gr.Image(type=\"filepath\", shape=...)\n
\n\n

Also note that our input Image component comes with an edit button \ud83d\udd89, which allows for cropping and zooming into images. Manipulating images in this way can help reveal biases or hidden flaws in a machine learning model!

\n\n

You can read more about the many components and how to use them in the Gradio docs.

\n\n

Chatbots

\n\n

Gradio includes a high-level class, gr.ChatInterface, which is similar to gr.Interface, but is specifically designed for chatbot UIs. The gr.ChatInterface class also wraps a function but this function must have a specific signature. The function should take two arguments: message and then history (the arguments can be named anything, but must be in this order)

\n\n
    \n
  • message: a str representing the user's input
  • \n
  • history: a list of list representing the conversations up until that point. Each inner list consists of two str representing a pair: [user input, bot response].
  • \n
\n\n

Your function should return a single string response, which is the bot's response to the particular user input message.

\n\n

Other than that, gr.ChatInterface has no required parameters (though several are available for customization of the UI).

\n\n

Here's a toy example:

\n\n
import random\nimport gradio as gr\n\ndef random_response(message, history):\n    return random.choice([\"Yes\", \"No\"])\n\ndemo = gr.ChatInterface(random_response)\n\ndemo.launch()\n\n
\n\n

\n\n

You can read more about gr.ChatInterface here.

\n\n

Blocks: More Flexibility and Control

\n\n

Gradio offers two approaches to build apps:

\n\n

1. Interface and ChatInterface, which provide a high-level abstraction for creating demos that we've been discussing so far.

\n\n

2. Blocks, a low-level API for designing web apps with more flexible layouts and data flows. Blocks allows you to do things like feature multiple data flows and demos, control where components appear on the page, handle complex data flows (e.g. outputs can serve as inputs to other functions), and update properties/visibility of components based on user interaction \u2014 still all in Python. If this customizability is what you need, try Blocks instead!

\n\n

Hello, Blocks

\n\n

Let's take a look at a simple example. Note how the API here differs from Interface.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n    name = gr.Textbox(label=\"Name\")\n    output = gr.Textbox(label=\"Output Box\")\n    greet_btn = gr.Button(\"Greet\")\n    greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n\n\ndemo.launch()\n
\n\n

\n\n

Things to note:

\n\n
    \n
  • Blocks are made with a with clause, and any component created inside this clause is automatically added to the app.
  • \n
  • Components appear vertically in the app in the order they are created. (Later we will cover customizing layouts!)
  • \n
  • A Button was created, and then a click event-listener was added to this button. The API for this should look familiar! Like an Interface, the click method takes a Python function, input components, and output components.
  • \n
\n\n

More Complexity

\n\n

Here's an app to give you a taste of what's possible with Blocks:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

A lot more going on here! We'll cover how to create complex Blocks apps like this in the building with blocks section for you.

\n\n

Congrats, you're now familiar with the basics of Gradio! \ud83e\udd73 Go to our next guide to learn more about the key features of Gradio.

\n", - "tags": [], - "spaces": [], - "url": "/guides/quickstart/", - "contributor": null - }, - { - "name": "key-features", - "category": "getting-started", - "pretty_category": "Getting Started", - "guide_index": 2, - "absolute_index": 1, - "pretty_name": "Key Features", - "content": "# Key Features\n\nLet's go through some of the most popular features of Gradio! Here are Gradio's key features:\n\n1. [Adding example inputs](#example-inputs)\n2. [Passing custom error messages](#alerts)\n3. [Adding descriptive content](#descriptive-content)\n4. [Setting up flagging](#flagging)\n5. [Preprocessing and postprocessing](#preprocessing-and-postprocessing)\n6. [Styling demos](#styling)\n7. [Queuing users](#queuing)\n8. [Iterative outputs](#iterative-outputs)\n9. [Progress bars](#progress-bars)\n10. [Batch functions](#batch-functions)\n11. [Running on collaborative notebooks](#colab-notebooks)\n\n## Example Inputs\n\nYou can provide example data that a user can easily load into `Interface`. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a **nested list** to the `examples=` keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the [Docs](https://gradio.app/docs#components).\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n if num2 == 0:\n raise gr.Error(\"Cannot divide by zero!\")\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\", \n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n examples=[\n [5, \"add\", 3],\n [4, \"divide\", 2],\n [-4, \"multiply\", 2.5],\n [0, \"subtract\", 1.2],\n ],\n title=\"Toy Calculator\",\n description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n```\n\n\nYou can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the `examples_per_page` argument of `Interface`).\n\nContinue learning about examples in the [More On Examples](https://gradio.app/more-on-examples) guide.\n\n## Alerts\n\nYou wish to pass custom error messages to the user. To do so, raise a `gr.Error(\"custom message\")` to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the [docs](https://gradio.app/docs#error). \n\nYou can also issue `gr.Warning(\"message\")` and `gr.Info(\"message\")` by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work. \n\nNote below how the `gr.Error` has to be raised, while the `gr.Warning` and `gr.Info` are single lines.\n\n```python\ndef start_process(name):\n gr.Info(\"Starting process\")\n if name is None:\n gr.Warning(\"Name is empty\")\n ...\n if success == False:\n raise gr.Error(\"Process failed\")\n```\n \n## Descriptive Content\n\nIn the previous example, you may have noticed the `title=` and `description=` keyword arguments in the `Interface` constructor that helps users understand your app.\n\nThere are three arguments in the `Interface` constructor to specify where this content should go:\n\n* `title`: which accepts text and can display it at the very top of interface, and also becomes the page title.\n* `description`: which accepts text, markdown or HTML and places it right under the title.\n* `article`: which also accepts text, markdown or HTML and places it below the interface.\n\n![annotated](https://github.com/gradio-app/gradio/blob/main/guides/assets/annotated.png?raw=true)\n\nIf you're using the `Blocks` API instead, you can insert text, markdown, or HTML anywhere using the `gr.Markdown(...)` or `gr.HTML(...)` components, with descriptive content inside the `Component` constructor.\n\nAnother useful keyword argument is `label=`, which is present in every `Component`. This modifies the label text at the top of each `Component`. You can also add the `info=` keyword argument to form elements like `Textbox` or `Radio` to provide further information on their usage.\n\n```python\ngr.Number(label='Age', info='In years, must be greater than 0')\n```\n\n## Flagging\n\nBy default, an `Interface` will have \"Flag\" button. When a user testing your `Interface` sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the `flagging_dir=` argument to the `Interface` constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.\n\nFor example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- calculator.py\n+-- flagged/\n| +-- logs.csv\n```\n\n*flagged/logs.csv*\n\n```csv\nnum1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n```\n\nWith the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- sepia.py\n+-- flagged/\n| +-- logs.csv\n| +-- im/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n\n*flagged/logs.csv*\n\n```csv\nim,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.\n\n## Preprocessing and Postprocessing\n\n![](https://github.com/gradio-app/gradio/blob/main/js/_website/src/assets/img/dataflow.svg?raw=true)\n\nAs you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.\n\nWhen a component is used as an input, Gradio automatically handles the *preprocessing* needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a `numpy` array).\n\nSimilarly, when a component is used as an output, Gradio automatically handles the *postprocessing* needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a `Gallery` of images in base64 format).\n\nYou can control the *preprocessing* using the parameters when constructing the image component. For example, here if you instantiate the `Image` component with the following parameters, it will convert the image to the `PIL` type and reshape it to be `(100, 100)` no matter the original size that it was submitted as:\n\n```py\nimg = gr.Image(shape=(100, 100), type=\"pil\")\n```\n\nIn contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:\n\n```py\nimg = gr.Image(invert_colors=True, type=\"numpy\")\n```\n\nPostprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the `Image` a `numpy` array or a `str` filepath?) and postprocesses it into a format that can be displayed by the browser.\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the preprocessing-related parameters for each Component.\n\n## Styling\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Interface` constructor. For example:\n\n```python\ndemo = gr.Interface(..., theme=gr.themes.Monochrome())\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](https://gradio.app/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n\n```python\nwith gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nSome components can be additionally styled through the `style()` method. For example:\n\n```python\nimg = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n```\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the styling options for each Component.\n\n## Queuing\n\nIf your app expects heavy traffic, use the `queue()` method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(...).queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```python\nwith gr.Blocks() as demo:\n #...\ndemo.queue()\ndemo.launch()\n```\n\nYou can control the number of requests processed at a single time as such:\n\n```python\ndemo.queue(concurrency_count=3)\n```\n\nSee the [Docs on queueing](/docs/#queue) on configuring other queuing parameters.\n\nTo specify only certain functions for queueing in Blocks:\n\n```python\nwith gr.Blocks() as demo2:\n num1 = gr.Number()\n num2 = gr.Number()\n output = gr.Number()\n gr.Button(\"Add\").click(\n lambda a, b: a + b, [num1, num2], output)\n gr.Button(\"Multiply\").click(\n lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n```\n\n## Iterative Outputs\n\nIn some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.\n\nIn such cases, you can supply a **generator** function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single `return` value, a function should `yield` a series of values instead. Usually the `yield` statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:\n\n```python\ndef my_generator(x):\n for i in range(x):\n yield i\n```\n\nYou supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:\n\n```python\nimport gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n for _ in range(steps):\n time.sleep(1)\n image = np.random.random((600, 600, 3))\n yield image\n image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n```\n\n\nNote that we've added a `time.sleep(1)` in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).\n\nSupplying a generator into Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n## Progress Bars\n\nGradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a `gr.Progress` instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the `tqdm()` method of the `Progress` instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.\n\n```python\nimport gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n progress(0, desc=\"Starting\")\n time.sleep(1)\n progress(0.05)\n new_string = \"\"\n for letter in progress.tqdm(word, desc=\"Reversing\"):\n time.sleep(0.25)\n new_string = letter + new_string\n return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n demo.queue(concurrency_count=10).launch()\n\n```\n\n\nIf you use the `tqdm` library, you can even report progress updates automatically from any `tqdm.tqdm` that already exists within your function by setting the default argument as `gr.Progress(track_tqdm=True)`!\n\n## Batch Functions\n\nGradio supports the ability to pass *batch* functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.\n\nFor example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:\n\n```py\nimport time\n\ndef trim_words(words, lens):\n trimmed_words = []\n time.sleep(5)\n for w, l in zip(words, lens):\n trimmed_words.append(w[:int(l)]) \n return [trimmed_words]\n```\n\nThe advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically *batch* incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe `batch=True` and `max_batch_size=16` -- both of these parameters can be passed\ninto event triggers or into the `Interface` class)\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n word = gr.Textbox(label=\"word\")\n leng = gr.Number(label=\"leng\")\n output = gr.Textbox(label=\"Output\")\n with gr.Row():\n run = gr.Button()\n\n event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n```\n\nIn the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face `transformers` and `diffusers` models\nwork very naturally with Gradio's batch mode: here's [an example demo using diffusers to\ngenerate images in batches](https://github.com/gradio-app/gradio/blob/main/demo/diffusers_with_batching/run.py)\n\nNote: using batch functions with Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n\n## Colab Notebooks\n\n\nGradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as [Google Colab](https://colab.research.google.com/). In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by [service worker tunneling](https://github.com/tensorflow/tensorboard/blob/master/docs/design/colab_integration.md), which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use [SSH tunneling](https://coderwall.com/p/ohk6cg/remote-access-to-ipython-notebooks-via-ssh) to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, [discussed in the next Guide](https://gradio.app/guides/sharing-your-app/#sharing-demos). ", - "html": "

Key Features

\n\n

Let's go through some of the most popular features of Gradio! Here are Gradio's key features:

\n\n
    \n
  1. Adding example inputs
  2. \n
  3. Passing custom error messages
  4. \n
  5. Adding descriptive content
  6. \n
  7. Setting up flagging
  8. \n
  9. Preprocessing and postprocessing
  10. \n
  11. Styling demos
  12. \n
  13. Queuing users
  14. \n
  15. Iterative outputs
  16. \n
  17. Progress bars
  18. \n
  19. Batch functions
  20. \n
  21. Running on collaborative notebooks
  22. \n
\n\n

Example Inputs

\n\n

You can provide example data that a user can easily load into Interface. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a nested list to the examples= keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the Docs.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        if num2 == 0:\n            raise gr.Error(\"Cannot divide by zero!\")\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\", \n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    examples=[\n        [5, \"add\", 3],\n        [4, \"divide\", 2],\n        [-4, \"multiply\", 2.5],\n        [0, \"subtract\", 1.2],\n    ],\n    title=\"Toy Calculator\",\n    description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n
\n\n

\n\n

You can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the examples_per_page argument of Interface).

\n\n

Continue learning about examples in the More On Examples guide.

\n\n

Alerts

\n\n

You wish to pass custom error messages to the user. To do so, raise a gr.Error(\"custom message\") to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the docs.

\n\n

You can also issue gr.Warning(\"message\") and gr.Info(\"message\") by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work.

\n\n

Note below how the gr.Error has to be raised, while the gr.Warning and gr.Info are single lines.

\n\n
def start_process(name):\n    gr.Info(\"Starting process\")\n    if name is None:\n        gr.Warning(\"Name is empty\")\n    ...\n    if success == False:\n        raise gr.Error(\"Process failed\")\n
\n\n

Descriptive Content

\n\n

In the previous example, you may have noticed the title= and description= keyword arguments in the Interface constructor that helps users understand your app.

\n\n

There are three arguments in the Interface constructor to specify where this content should go:

\n\n
    \n
  • title: which accepts text and can display it at the very top of interface, and also becomes the page title.
  • \n
  • description: which accepts text, markdown or HTML and places it right under the title.
  • \n
  • article: which also accepts text, markdown or HTML and places it below the interface.
  • \n
\n\n

\"annotated\"

\n\n

If you're using the Blocks API instead, you can insert text, markdown, or HTML anywhere using the gr.Markdown(...) or gr.HTML(...) components, with descriptive content inside the Component constructor.

\n\n

Another useful keyword argument is label=, which is present in every Component. This modifies the label text at the top of each Component. You can also add the info= keyword argument to form elements like Textbox or Radio to provide further information on their usage.

\n\n
gr.Number(label='Age', info='In years, must be greater than 0')\n
\n\n

Flagging

\n\n

By default, an Interface will have \"Flag\" button. When a user testing your Interface sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the flagging_dir= argument to the Interface constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.

\n\n

For example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- calculator.py\n+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n
\n\n

With the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- sepia.py\n+-- flagged/\n|   +-- logs.csv\n|   +-- im/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.

\n\n

Preprocessing and Postprocessing

\n\n

\"\"

\n\n

As you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.

\n\n

When a component is used as an input, Gradio automatically handles the preprocessing needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a numpy array).

\n\n

Similarly, when a component is used as an output, Gradio automatically handles the postprocessing needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a Gallery of images in base64 format).

\n\n

You can control the preprocessing using the parameters when constructing the image component. For example, here if you instantiate the Image component with the following parameters, it will convert the image to the PIL type and reshape it to be (100, 100) no matter the original size that it was submitted as:

\n\n
img = gr.Image(shape=(100, 100), type=\"pil\")\n
\n\n

In contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:

\n\n
img = gr.Image(invert_colors=True, type=\"numpy\")\n
\n\n

Postprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the Image a numpy array or a str filepath?) and postprocesses it into a format that can be displayed by the browser.

\n\n

Take a look at the Docs to see all the preprocessing-related parameters for each Component.

\n\n

Styling

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Interface constructor. For example:

\n\n
demo = gr.Interface(..., theme=gr.themes.Monochrome())\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.\nThe base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

Some components can be additionally styled through the style() method. For example:

\n\n
img = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n
\n\n

Take a look at the Docs to see all the styling options for each Component.

\n\n

Queuing

\n\n

If your app expects heavy traffic, use the queue() method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).

\n\n

With Interface:

\n\n
demo = gr.Interface(...).queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
with gr.Blocks() as demo:\n    #...\ndemo.queue()\ndemo.launch()\n
\n\n

You can control the number of requests processed at a single time as such:

\n\n
demo.queue(concurrency_count=3)\n
\n\n

See the Docs on queueing on configuring other queuing parameters.

\n\n

To specify only certain functions for queueing in Blocks:

\n\n
with gr.Blocks() as demo2:\n    num1 = gr.Number()\n    num2 = gr.Number()\n    output = gr.Number()\n    gr.Button(\"Add\").click(\n        lambda a, b: a + b, [num1, num2], output)\n    gr.Button(\"Multiply\").click(\n        lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n
\n\n

Iterative Outputs

\n\n

In some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.

\n\n

In such cases, you can supply a generator function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single return value, a function should yield a series of values instead. Usually the yield statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:

\n\n
def my_generator(x):\n    for i in range(x):\n        yield i\n
\n\n

You supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:

\n\n
import gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n    for _ in range(steps):\n        time.sleep(1)\n        image = np.random.random((600, 600, 3))\n        yield image\n    image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n    yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n
\n\n

\n\n

Note that we've added a time.sleep(1) in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).

\n\n

Supplying a generator into Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Progress Bars

\n\n

Gradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a gr.Progress instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the tqdm() method of the Progress instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.

\n\n
import gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n    progress(0, desc=\"Starting\")\n    time.sleep(1)\n    progress(0.05)\n    new_string = \"\"\n    for letter in progress.tqdm(word, desc=\"Reversing\"):\n        time.sleep(0.25)\n        new_string = letter + new_string\n    return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n    demo.queue(concurrency_count=10).launch()\n\n
\n\n

\n\n

If you use the tqdm library, you can even report progress updates automatically from any tqdm.tqdm that already exists within your function by setting the default argument as gr.Progress(track_tqdm=True)!

\n\n

Batch Functions

\n\n

Gradio supports the ability to pass batch functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.

\n\n

For example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:

\n\n
import time\n\ndef trim_words(words, lens):\n    trimmed_words = []\n    time.sleep(5)\n    for w, l in zip(words, lens):\n        trimmed_words.append(w[:int(l)])        \n    return [trimmed_words]\n
\n\n

The advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically batch incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe batch=True and max_batch_size=16 -- both of these parameters can be passed\ninto event triggers or into the Interface class)

\n\n

With Interface:

\n\n
demo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n                    batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        word = gr.Textbox(label=\"word\")\n        leng = gr.Number(label=\"leng\")\n        output = gr.Textbox(label=\"Output\")\n    with gr.Row():\n        run = gr.Button()\n\n    event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n
\n\n

In the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face transformers and diffusers models\nwork very naturally with Gradio's batch mode: here's an example demo using diffusers to\ngenerate images in batches

\n\n

Note: using batch functions with Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Colab Notebooks

\n\n

Gradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as Google Colab. In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by service worker tunneling, which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use SSH tunneling to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, discussed in the next Guide.

\n", - "tags": [], - "spaces": [], - "url": "/guides/key-features/", - "contributor": null - }, - { - "name": "sharing-your-app", - "category": "getting-started", - "pretty_category": "Getting Started", - "guide_index": 3, - "absolute_index": 2, - "pretty_name": "Sharing Your App", - "content": "# Sharing Your App\n\nHow to share your Gradio app: \n\n1. [Sharing demos with the share parameter](#sharing-demos)\n2. [Hosting on HF Spaces](#hosting-on-hf-spaces)\n3. [Embedding hosted spaces](#embedding-hosted-spaces)\n4. [Embedding with web components](#embedding-with-web-components)\n5. [Using the API page](#api-page)\n6. [Adding authentication to the page](#authentication)\n7. [Accessing Network Requests](#accessing-the-network-request-directly)\n8. [Mounting within FastAPI](#mounting-within-another-fast-api-app)\n9. [Security](#security-and-file-access)\n\n## Sharing Demos\n\nGradio demos can be easily shared publicly by setting `share=True` in the `launch()` method. Like this:\n\n```python\ndemo.launch(share=True)\n```\n\nThis generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: **XXXXX.gradio.app**. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.\n\nKeep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set `share=False` (the default, except in colab notebooks), only a local link is created, which can be shared by [port-forwarding](https://www.ssh.com/ssh/tunneling/example) with specific users. \n\n![sharing](https://github.com/gradio-app/gradio/blob/main/guides/assets/sharing.svg?raw=true)\n\nShare links expire after 72 hours.\n\n## Hosting on HF Spaces\n\nIf you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. [Hugging Face Spaces](http://huggingface.co/spaces/) provides the infrastructure to permanently host your machine learning model for free! \n\nAfter you have [created a free Hugging Face account](https://huggingface.co/join), you have three methods to deploy your Gradio app to Hugging Face Spaces:\n\n1. From terminal: run `gradio deploy` in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on `git push`.\n\n2. From your browser: Drag and drop a folder containing your Gradio model and all related files [here](https://huggingface.co/new-space).\n\n3. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See [this guide how to host on Hugging Face Spaces](https://huggingface.co/blog/gradio-spaces) for more information. \n\n\n\nNote: Some components, like `gr.Image`, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with `show_share_button`, such as `gr.Image(show_share_button=False)`. \n\n![Image with show_share_button=True](https://github.com/gradio-app/gradio/blob/main/guides/assets/share_icon.png?raw=true)\n\n## Embedding Hosted Spaces\n\nOnce you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.\n\nThere are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:\n\n![Embed this Space dropdown option](https://github.com/gradio-app/gradio/blob/main/guides/assets/embed_this_space.png?raw=true)\n\n### Embedding with Web Components\n\nWeb components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app. \n\nTo embed with Web Components:\n\n1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using). \n\n```html\n\n```\n\n2. Add \n```html\n\n```\n\nelement where you want to place the app. Set the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:\n\n\n```html\n\n```\n\n\n\nYou can see examples of how web components look on the Gradio landing page.\n\nYou can also customize the appearance and behavior of your web component with attributes that you pass into the `` tag:\n\n* `src`: as we've seen, the `src` attributes links to the URL of the hosted Gradio demo that you would like to embed\n* `space`: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a `username/space_name` instead of a full URL. Example: `gradio/Echocardiogram-Segmentation`. If this attribute attribute is provided, then `src` does not need to be provided.\n* `control_page_title`: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default `\"false\"`)\n* `initial_height`: the initial height of the web component while it is loading the Gradio app, (by default `\"300px\"`). Note that the final height is set based on the size of the Gradio app.\n* `container`: whether to show the border frame and information about where the Space is hosted (by default `\"true\"`)\n* `info`: whether to show just the information about where the Space is hosted underneath the embedded app (by default `\"true\"`)\n* `autoscroll`: whether to autoscroll to the output when prediction has finished (by default `\"false\"`)\n* `eager`: whether to load the Gradio app as soon as the page loads (by default `\"false\"`)\n* `theme_mode`: whether to use the `dark`, `light`, or default `system` theme mode (by default `\"system\"`)\n\nHere's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px. \n\n```html\n\n```\n\n_Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as `header { ... }` and `footer { ... }` will be the most likely to cause issues._\n\n### Embedding with IFrames\n\nTo embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:\n\n```html\n\n```\n\nAgain, you can find the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.\n\nNote: if you use IFrames, you'll probably want to add a fixed `height` attribute and set `style=\"border:0;\"` to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the `allow` attribute.\n\n## API Page\n\nYou can use almost any Gradio app as an API! In the footer of a Gradio app [like this one](https://huggingface.co/spaces/gradio/hello_world), you'll see a \"Use via API\" link. \n\n![Use via API](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/api3.gif)\n\nThis is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either [the Python client](https://gradio.app/guides/getting-started-with-the-python-client/), or [the JavaScript client](https://gradio.app/guides/getting-started-with-the-js-client/). For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.\n\nThe endpoints are automatically created when you launch a Gradio `Interface`. If you are using Gradio `Blocks`, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as\n\n```python\nbtn.click(add, [num1, num2], output, api_name=\"addition\")\n```\n\nThis will add and document the endpoint `/api/addition/` to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints. \n\n*Note*: For Gradio apps in which [queueing is enabled](https://gradio.app/guides/key-features#queuing), users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set `api_open=False` in the `queue()` method. To disable the API page altogether, set `show_api=False` in `.launch()`.\n\n## Authentication\n\nYou may wish to put an authentication page in front of your app to limit who can open your app. With the `auth=` keyword argument in the `launch()` method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":\n\n```python\ndemo.launch(auth=(\"admin\", \"pass1234\"))\n```\n\nFor more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.\n\nHere's an example of a function that accepts any login where the username and password are the same:\n\n```python\ndef same_auth(username, password):\n return username == password\ndemo.launch(auth=same_auth)\n```\n\nFor authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.\n\n## Accessing the Network Request Directly\n\nWhen a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is `gr.Request` and Gradio will pass in the network request as that parameter. Here is an example:\n\n```python\nimport gradio as gr\n\ndef echo(name, request: gr.Request):\n if request:\n print(\"Request headers dictionary:\", request.headers)\n print(\"IP address:\", request.client.host)\n return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n```\n\nNote: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then `request` will be `None`. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check `if request`.\n\n## Mounting Within Another FastAPI App\n\nIn some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with `gradio.mount_gradio_app()`.\n\nHere's a complete example:\n\n```python\nfrom fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n```\n\nNote that this approach also allows you run your Gradio apps on custom paths (`http://localhost:8000/gradio` in the example above).\n\n## Security and File Access\n\nSharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) **exposes** certain files on the host machine to users of your Gradio app. \n\nIn particular, Gradio apps ALLOW users to access to three kinds of files:\n\n* **Files in the same directory (or a subdirectory) of where the Gradio script is launched from.** For example, if the path to your gradio scripts is `/home/usr/scripts/project/app.py` and you launch it from `/home/usr/scripts/project/`, then users of your shared Gradio app will be able to access any files inside `/home/usr/scripts/project/`. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's `examples`).\n\n* **Temporary files created by Gradio.** These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable `GRADIO_TEMP_DIR` to an absolute path, such as `/home/usr/scripts/project/temp/`.\n\n* **Files that you explicitly allow via the `allowed_paths` parameter in `launch()`**. This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).\n\nGradio DOES NOT ALLOW access to:\n\n* **Dotfiles** (any files whose name begins with `'.'`) or any files that are contained in any directory whose name begins with `'.'`\n\n* **Files that you explicitly allow via the `blocked_paths` parameter in `launch()`**. You can pass in a list of additional directories or exact filepaths to the `blocked_paths` parameter in `launch()`. This parameter takes precedence over the files that Gradio exposes by default or by the `allowed_paths`.\n\n* **Any other paths on the host machine**. Users should NOT be able to access other arbitrary paths on the host. \n\nPlease make sure you are running the latest version of `gradio` for these security settings to apply. ", - "html": "

Sharing Your App

\n\n

How to share your Gradio app:

\n\n
    \n
  1. Sharing demos with the share parameter
  2. \n
  3. Hosting on HF Spaces
  4. \n
  5. Embedding hosted spaces
  6. \n
  7. Embedding with web components
  8. \n
  9. Using the API page
  10. \n
  11. Adding authentication to the page
  12. \n
  13. Accessing Network Requests
  14. \n
  15. Mounting within FastAPI
  16. \n
  17. Security
  18. \n
\n\n

Sharing Demos

\n\n

Gradio demos can be easily shared publicly by setting share=True in the launch() method. Like this:

\n\n
demo.launch(share=True)\n
\n\n

This generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: XXXXX.gradio.app. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.

\n\n

Keep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set share=False (the default, except in colab notebooks), only a local link is created, which can be shared by port-forwarding with specific users.

\n\n

\"sharing\"

\n\n

Share links expire after 72 hours.

\n\n

Hosting on HF Spaces

\n\n

If you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. Hugging Face Spaces provides the infrastructure to permanently host your machine learning model for free!

\n\n

After you have created a free Hugging Face account, you have three methods to deploy your Gradio app to Hugging Face Spaces:

\n\n
    \n
  1. From terminal: run gradio deploy in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on git push.

  2. \n
  3. From your browser: Drag and drop a folder containing your Gradio model and all related files here.

  4. \n
  5. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See this guide how to host on Hugging Face Spaces for more information.

  6. \n
\n\n

\n\n

Note: Some components, like gr.Image, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with show_share_button, such as gr.Image(show_share_button=False).

\n\n

\"Imagesharebutton=True\" />

\n\n

Embedding Hosted Spaces

\n\n

Once you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.

\n\n

There are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:

\n\n

\"Embed

\n\n

Embedding with Web Components

\n\n

Web components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app.

\n\n

To embed with Web Components:

\n\n
    \n
  1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using).
  2. \n
\n\n
\n
\n\n
    \n
  1. Add
  2. \n
\n\n
\n
\n\n

element where you want to place the app. Set the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:

\n\n
\n
\n\n\n\n

You can see examples of how web components look on the Gradio landing page.

\n\n

You can also customize the appearance and behavior of your web component with attributes that you pass into the <gradio-app> tag:

\n\n
    \n
  • src: as we've seen, the src attributes links to the URL of the hosted Gradio demo that you would like to embed
  • \n
  • space: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a username/space_name instead of a full URL. Example: gradio/Echocardiogram-Segmentation. If this attribute attribute is provided, then src does not need to be provided.
  • \n
  • control_page_title: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default \"false\")
  • \n
  • initial_height: the initial height of the web component while it is loading the Gradio app, (by default \"300px\"). Note that the final height is set based on the size of the Gradio app.
  • \n
  • container: whether to show the border frame and information about where the Space is hosted (by default \"true\")
  • \n
  • info: whether to show just the information about where the Space is hosted underneath the embedded app (by default \"true\")
  • \n
  • autoscroll: whether to autoscroll to the output when prediction has finished (by default \"false\")
  • \n
  • eager: whether to load the Gradio app as soon as the page loads (by default \"false\")
  • \n
  • theme_mode: whether to use the dark, light, or default system theme mode (by default \"system\")
  • \n
\n\n

Here's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px.

\n\n
\n
\n\n

Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as header { ... } and footer { ... } will be the most likely to cause issues.

\n\n

Embedding with IFrames

\n\n

To embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:

\n\n
\n
\n\n

Again, you can find the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.

\n\n

Note: if you use IFrames, you'll probably want to add a fixed height attribute and set style=\"border:0;\" to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the allow attribute.

\n\n

API Page

\n\n

You can use almost any Gradio app as an API! In the footer of a Gradio app like this one, you'll see a \"Use via API\" link.

\n\n

\"Use

\n\n

This is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either the Python client, or the JavaScript client. For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.

\n\n

The endpoints are automatically created when you launch a Gradio Interface. If you are using Gradio Blocks, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as

\n\n
btn.click(add, [num1, num2], output, api_name=\"addition\")\n
\n\n

This will add and document the endpoint /api/addition/ to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints.

\n\n

Note: For Gradio apps in which queueing is enabled, users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set api_open=False in the queue() method. To disable the API page altogether, set show_api=False in .launch().

\n\n

Authentication

\n\n

You may wish to put an authentication page in front of your app to limit who can open your app. With the auth= keyword argument in the launch() method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":

\n\n
demo.launch(auth=(\"admin\", \"pass1234\"))\n
\n\n

For more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.

\n\n

Here's an example of a function that accepts any login where the username and password are the same:

\n\n
def same_auth(username, password):\n    return username == password\ndemo.launch(auth=same_auth)\n
\n\n

For authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.

\n\n

Accessing the Network Request Directly

\n\n

When a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is gr.Request and Gradio will pass in the network request as that parameter. Here is an example:

\n\n
import gradio as gr\n\ndef echo(name, request: gr.Request):\n    if request:\n        print(\"Request headers dictionary:\", request.headers)\n        print(\"IP address:\", request.client.host)\n    return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n
\n\n

Note: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then request will be None. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check if request.

\n\n

Mounting Within Another FastAPI App

\n\n

In some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with gradio.mount_gradio_app().

\n\n

Here's a complete example:

\n\n
from fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n    return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n
\n\n

Note that this approach also allows you run your Gradio apps on custom paths (http://localhost:8000/gradio in the example above).

\n\n

Security and File Access

\n\n

Sharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) exposes certain files on the host machine to users of your Gradio app.

\n\n

In particular, Gradio apps ALLOW users to access to three kinds of files:

\n\n
    \n
  • Files in the same directory (or a subdirectory) of where the Gradio script is launched from. For example, if the path to your gradio scripts is /home/usr/scripts/project/app.py and you launch it from /home/usr/scripts/project/, then users of your shared Gradio app will be able to access any files inside /home/usr/scripts/project/. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's examples).

  • \n
  • Temporary files created by Gradio. These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable GRADIO_TEMP_DIR to an absolute path, such as /home/usr/scripts/project/temp/.

  • \n
  • Files that you explicitly allow via the allowed_paths parameter in launch(). This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).

  • \n
\n\n

Gradio DOES NOT ALLOW access to:

\n\n
    \n
  • Dotfiles (any files whose name begins with '.') or any files that are contained in any directory whose name begins with '.'

  • \n
  • Files that you explicitly allow via the blocked_paths parameter in launch(). You can pass in a list of additional directories or exact filepaths to the blocked_paths parameter in launch(). This parameter takes precedence over the files that Gradio exposes by default or by the allowed_paths.

  • \n
  • Any other paths on the host machine. Users should NOT be able to access other arbitrary paths on the host.

  • \n
\n\n

Please make sure you are running the latest version of gradio for these security settings to apply.

\n", - "tags": [], - "spaces": [], - "url": "/guides/sharing-your-app/", - "contributor": null - }, - { - "name": "interface-state", - "category": "building-interfaces", - "pretty_category": "Building Interfaces", - "guide_index": 1, - "absolute_index": 3, - "pretty_name": "Interface State", - "content": "# Interface State\n\nThis guide covers how State is handled in Gradio. Learn the difference between Global and Session states, and how to use both.\n\n## Global State\n\nYour function may use data that persists beyond a single function call. If the data is something accessible to all function calls and all users, you can create a variable outside the function call and access it inside the function. For example, you may load a large model outside the function and use it inside the function so that every function call does not need to reload the model. \n\n```python\nimport gradio as gr\n\nscores = []\n\ndef track_score(score):\n scores.append(score)\n top_scores = sorted(scores, reverse=True)[:3]\n return top_scores\n\ndemo = gr.Interface(\n track_score, \n gr.Number(label=\"Score\"), \n gr.JSON(label=\"Top Scores\")\n)\ndemo.launch()\n```\n\nIn the code above, the `scores` array is shared between all users. If multiple users are accessing this demo, their scores will all be added to the same list, and the returned top 3 scores will be collected from this shared reference. \n\n## Session State\n\nAnother type of data persistence Gradio supports is session **state**, where data persists across multiple submits within a page session. However, data is *not* shared between different users of your model. To store data in a session state, you need to do three things:\n\n1. Pass in an extra parameter into your function, which represents the state of the interface.\n2. At the end of the function, return the updated value of the state as an extra return value.\n3. Add the `'state'` input and `'state'` output components when creating your `Interface`\n\nA chatbot is an example where you would need session state - you want access to a users previous submissions, but you cannot store chat history in a global variable, because then chat history would get jumbled between different users. \n\n```python\nimport gradio as gr\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nimport torch\n\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-medium\")\nmodel = AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-medium\")\n\n\ndef user(message, history):\n return \"\", history + [[message, None]]\n\n\ndef bot(history):\n user_message = history[-1][0]\n new_user_input_ids = tokenizer.encode(\n user_message + tokenizer.eos_token, return_tensors=\"pt\"\n )\n\n # append the new user input tokens to the chat history\n bot_input_ids = torch.cat([torch.LongTensor([]), new_user_input_ids], dim=-1)\n\n # generate a response\n response = model.generate(\n bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id\n ).tolist()\n\n # convert the tokens to text, and then split the responses into lines\n response = tokenizer.decode(response[0]).split(\"<|endoftext|>\")\n response = [\n (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)\n ] # convert to tuples of list\n history[-1] = response[0]\n return history\n\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.Button(\"Clear\")\n\n msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.launch()\n\n```\n\n\nNotice how the state persists across submits within each page, but if you load this demo in another tab (or refresh the page), the demos will not share chat history. \n\nThe default value of `state` is None. If you pass a default value to the state parameter of the function, it is used as the default value of the state instead. The `Interface` class only supports a single input and outputs state variable, though it can be a list with multiple elements. For more complex use cases, you can use Blocks, [which supports multiple `State` variables](/guides/state-in-blocks/).", - "html": "

Interface State

\n\n

This guide covers how State is handled in Gradio. Learn the difference between Global and Session states, and how to use both.

\n\n

Global State

\n\n

Your function may use data that persists beyond a single function call. If the data is something accessible to all function calls and all users, you can create a variable outside the function call and access it inside the function. For example, you may load a large model outside the function and use it inside the function so that every function call does not need to reload the model.

\n\n
import gradio as gr\n\nscores = []\n\ndef track_score(score):\n    scores.append(score)\n    top_scores = sorted(scores, reverse=True)[:3]\n    return top_scores\n\ndemo = gr.Interface(\n    track_score, \n    gr.Number(label=\"Score\"), \n    gr.JSON(label=\"Top Scores\")\n)\ndemo.launch()\n
\n\n

In the code above, the scores array is shared between all users. If multiple users are accessing this demo, their scores will all be added to the same list, and the returned top 3 scores will be collected from this shared reference.

\n\n

Session State

\n\n

Another type of data persistence Gradio supports is session state, where data persists across multiple submits within a page session. However, data is not shared between different users of your model. To store data in a session state, you need to do three things:

\n\n
    \n
  1. Pass in an extra parameter into your function, which represents the state of the interface.
  2. \n
  3. At the end of the function, return the updated value of the state as an extra return value.
  4. \n
  5. Add the 'state' input and 'state' output components when creating your Interface
  6. \n
\n\n

A chatbot is an example where you would need session state - you want access to a users previous submissions, but you cannot store chat history in a global variable, because then chat history would get jumbled between different users.

\n\n
import gradio as gr\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nimport torch\n\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-medium\")\nmodel = AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-medium\")\n\n\ndef user(message, history):\n    return \"\", history + [[message, None]]\n\n\ndef bot(history):\n    user_message = history[-1][0]\n    new_user_input_ids = tokenizer.encode(\n        user_message + tokenizer.eos_token, return_tensors=\"pt\"\n    )\n\n    # append the new user input tokens to the chat history\n    bot_input_ids = torch.cat([torch.LongTensor([]), new_user_input_ids], dim=-1)\n\n    # generate a response\n    response = model.generate(\n        bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id\n    ).tolist()\n\n    # convert the tokens to text, and then split the responses into lines\n    response = tokenizer.decode(response[0]).split(\"<|endoftext|>\")\n    response = [\n        (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)\n    ]  # convert to tuples of list\n    history[-1] = response[0]\n    return history\n\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.Button(\"Clear\")\n\n    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.launch()\n\n
\n\n

\n\n

Notice how the state persists across submits within each page, but if you load this demo in another tab (or refresh the page), the demos will not share chat history.

\n\n

The default value of state is None. If you pass a default value to the state parameter of the function, it is used as the default value of the state instead. The Interface class only supports a single input and outputs state variable, though it can be a list with multiple elements. For more complex use cases, you can use Blocks, which supports multiple State variables.

\n", - "tags": [], - "spaces": [], - "url": "/guides/interface-state/", - "contributor": null - }, - { - "name": "reactive-interfaces", - "category": "building-interfaces", - "pretty_category": "Building Interfaces", - "guide_index": 2, - "absolute_index": 4, - "pretty_name": "Reactive Interfaces", - "content": "# Reactive Interfaces\n\nThis guide covers how to get Gradio interfaces to refresh automatically or continuously stream data.\n\n## Live Interfaces\n\nYou can make interfaces automatically refresh by setting `live=True` in the interface. Now the interface will recalculate as soon as the user input changes.\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\",\n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n live=True,\n)\ndemo.launch()\n\n```\n\n\nNote there is no submit button, because the interface resubmits automatically on change.\n\n## Streaming Components\n\nSome components have a \"streaming\" mode, such as `Audio` component in microphone mode, or the `Image` component in webcam mode. Streaming means data is sent continuously to the backend and the `Interface` function is continuously being rerun. \n\nThe difference between `gr.Audio(source='microphone')` and `gr.Audio(source='microphone', streaming=True)`, when both are used in `gr.Interface(live=True)`, is that the first `Component` will automatically submit data and run the `Interface` function when the user stops recording, whereas the second `Component` will continuously send data and run the `Interface` function *during* recording.\n\nHere is example code of streaming images from the webcam.\n\n```python\nimport gradio as gr\nimport numpy as np\n\ndef flip(im):\n return np.flipud(im)\n\ndemo = gr.Interface(\n flip, \n gr.Image(source=\"webcam\", streaming=True), \n \"image\",\n live=True\n)\ndemo.launch()\n \n```", - "html": "

Reactive Interfaces

\n\n

This guide covers how to get Gradio interfaces to refresh automatically or continuously stream data.

\n\n

Live Interfaces

\n\n

You can make interfaces automatically refresh by setting live=True in the interface. Now the interface will recalculate as soon as the user input changes.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\",\n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    live=True,\n)\ndemo.launch()\n\n
\n\n

\n\n

Note there is no submit button, because the interface resubmits automatically on change.

\n\n

Streaming Components

\n\n

Some components have a \"streaming\" mode, such as Audio component in microphone mode, or the Image component in webcam mode. Streaming means data is sent continuously to the backend and the Interface function is continuously being rerun.

\n\n

The difference between gr.Audio(source='microphone') and gr.Audio(source='microphone', streaming=True), when both are used in gr.Interface(live=True), is that the first Component will automatically submit data and run the Interface function when the user stops recording, whereas the second Component will continuously send data and run the Interface function during recording.

\n\n

Here is example code of streaming images from the webcam.

\n\n
import gradio as gr\nimport numpy as np\n\ndef flip(im):\n    return np.flipud(im)\n\ndemo = gr.Interface(\n    flip, \n    gr.Image(source=\"webcam\", streaming=True), \n    \"image\",\n    live=True\n)\ndemo.launch()\n\n
\n", - "tags": [], - "spaces": [], - "url": "/guides/reactive-interfaces/", - "contributor": null - }, - { - "name": "advanced-interface-features", - "category": "building-interfaces", - "pretty_category": "Building Interfaces", - "guide_index": 4, - "absolute_index": 6, - "pretty_name": "Advanced Interface Features", - "content": "# Advanced Interface Features\n\nThere's more to cover on the [Interface](https://gradio.app/docs#interface) class. This guide covers all the advanced features: Using [Interpretation](https://gradio.app/docs#interpretation), custom styling, loading from the [Hugging Face Hub](https://hf.co), and using [Parallel](https://gradio.app/docs#parallel) and [Series](https://gradio.app/docs#series). \n\n## Interpreting your Predictions\n\nMost models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the `interpretation` keyword in the `Interface` class to `default`. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:\n\n```python\nimport requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2() # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n```\n\n\nIn addition to `default`, Gradio also includes [Shapley-based interpretation](https://christophm.github.io/interpretable-ml-book/shap.html), which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the `interpretation` parameter to `\"shap\"` (note: also make sure the python package `shap` is installed). Optionally, you can modify the `num_shap` parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:\n\n```python\ngr.Interface(fn=classify_image,\n inputs=image, \n outputs=label, \n interpretation=\"shap\", \n num_shap=5).launch()\n```\n\nThis will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's `default` or `shap` interpretation, the output component must be a `Label`. All common input components are supported. Here is an example with text input.\n\n```python\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=\"default\",\n)\n\ndemo.launch()\n\n```\n\nSo what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.\n\nYou can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.\n\n```python\nimport re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n result = gender_of_sentence(sentence)\n is_male = result[\"male\"] > result[\"female\"]\n interpretation = []\n for word in re.split(\"( )\", sentence):\n score = 0\n token = word.lower()\n if (is_male and token in male_words) or (not is_male and token in female_words):\n score = 1\n elif (is_male and token in female_words) or (\n not is_male and token in male_words\n ):\n score = -1\n interpretation.append((word, score))\n # Output must be a list of lists containing the same number of elements as inputs\n # Each element corresponds to the interpretation scores for the given input\n return [interpretation]\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n```\n\nLearn more about Interpretation in the [docs](https://gradio.app/docs#interpretation). \n\n## Custom Styling\n\nIf you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the `css` parameter of the `Interface` class.\n\n```python\ngr.Interface(..., css=\"body {background-color: red}\")\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\ngr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n```\n\n**Warning**: Custom CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using [Themes](/guides/theming-guide/) whenever possible. \n\n## Loading Hugging Face Models and Spaces\n\nGradio integrates nicely with the [Hugging Face Hub](https://hf.co), allowing you to load models and Spaces with just one line of code. To use this, simply use the `load()` method in the `Interface` class. So:\n\n- To load any model from the Hugging Face Hub and create an interface around it, you pass `\"model/\"` or `\"huggingface/\"` followed by the model name, like these examples:\n\n```python\ngr.Interface.load(\"huggingface/gpt2\").launch();\n```\n\n```python\ngr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n inputs=gr.Textbox(lines=5, label=\"Input Text\") # customizes the input component\n).launch()\n```\n\n- To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass `\"spaces/\"` followed by the model name:\n\n```python\ngr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n inputs=\"webcam\", \n title=\"Remove your webcam background!\").launch()\n```\n\nOne of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting `Interface` object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):\n\n```python\nio = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\") # outputs model completion\n```\n\n## Putting Interfaces in Parallel and Series\n\nGradio also lets you mix interfaces very easily using the `gradio.Parallel` and `gradio.Series` classes. `Parallel` lets you put two similar models (if they have the same input type) in parallel to compare model predictions:\n\n```python\ngenerator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n```\n\n`Series` lets you put models and spaces in series, piping the output of one model into the input of the next model. \n\n```python\ngenerator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch() \n# this demo generates text, then translates it to German, and outputs the final result.\n```\n\nAnd of course, you can also mix `Parallel` and `Series` together whenever that makes sense!\n\nLearn more about Parallel and Series in the [docs](https://gradio.app/docs#parallel). ", - "html": "

Advanced Interface Features

\n\n

There's more to cover on the Interface class. This guide covers all the advanced features: Using Interpretation, custom styling, loading from the Hugging Face Hub, and using Parallel and Series.

\n\n

Interpreting your Predictions

\n\n

Most models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the interpretation keyword in the Interface class to default. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:

\n\n
import requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2()  # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n    inp = inp.reshape((-1, 224, 224, 3))\n    inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n    prediction = inception_net.predict(inp).flatten()\n    return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n    fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n
\n\n

In addition to default, Gradio also includes Shapley-based interpretation, which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the interpretation parameter to \"shap\" (note: also make sure the python package shap is installed). Optionally, you can modify the num_shap parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:

\n\n
gr.Interface(fn=classify_image,\n            inputs=image, \n            outputs=label, \n            interpretation=\"shap\", \n            num_shap=5).launch()\n
\n\n

This will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's default or shap interpretation, the output component must be a Label. All common input components are supported. Here is an example with text input.

\n\n
import gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=\"default\",\n)\n\ndemo.launch()\n\n
\n\n

So what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.

\n\n

You can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.

\n\n
import re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n    result = gender_of_sentence(sentence)\n    is_male = result[\"male\"] > result[\"female\"]\n    interpretation = []\n    for word in re.split(\"( )\", sentence):\n        score = 0\n        token = word.lower()\n        if (is_male and token in male_words) or (not is_male and token in female_words):\n            score = 1\n        elif (is_male and token in female_words) or (\n            not is_male and token in male_words\n        ):\n            score = -1\n        interpretation.append((word, score))\n    # Output must be a list of lists containing the same number of elements as inputs\n    # Each element corresponds to the interpretation scores for the given input\n    return [interpretation]\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n
\n\n

Learn more about Interpretation in the docs.

\n\n

Custom Styling

\n\n

If you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the css parameter of the Interface class.

\n\n
gr.Interface(..., css=\"body {background-color: red}\")\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
gr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n
\n\n

Warning: Custom CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using Themes whenever possible.

\n\n

Loading Hugging Face Models and Spaces

\n\n

Gradio integrates nicely with the Hugging Face Hub, allowing you to load models and Spaces with just one line of code. To use this, simply use the load() method in the Interface class. So:

\n\n
    \n
  • To load any model from the Hugging Face Hub and create an interface around it, you pass \"model/\" or \"huggingface/\" followed by the model name, like these examples:
  • \n
\n\n
gr.Interface.load(\"huggingface/gpt2\").launch();\n
\n\n
gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n    inputs=gr.Textbox(lines=5, label=\"Input Text\")  # customizes the input component\n).launch()\n
\n\n
    \n
  • To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass \"spaces/\" followed by the model name:
  • \n
\n\n
gr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n                  inputs=\"webcam\", \n                  title=\"Remove your webcam background!\").launch()\n
\n\n

One of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting Interface object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):

\n\n
io = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\")  # outputs model completion\n
\n\n

Putting Interfaces in Parallel and Series

\n\n

Gradio also lets you mix interfaces very easily using the gradio.Parallel and gradio.Series classes. Parallel lets you put two similar models (if they have the same input type) in parallel to compare model predictions:

\n\n
generator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n
\n\n

Series lets you put models and spaces in series, piping the output of one model into the input of the next model.

\n\n
generator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch()  \n# this demo generates text, then translates it to German, and outputs the final result.\n
\n\n

And of course, you can also mix Parallel and Series together whenever that makes sense!

\n\n

Learn more about Parallel and Series in the docs.

\n", - "tags": [], - "spaces": [], - "url": "/guides/advanced-interface-features/", - "contributor": null - } - ], - "parent": "gradio" - }, - "tabbedinterface": { - "class": null, - "name": "TabbedInterface", - "description": "A TabbedInterface is created by providing a list of Interfaces, each of which gets rendered in a separate tab.", - "tags": { "demos": "stt_or_tts" }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "interface_list", - "annotation": "list[Interface]", - "doc": "a list of interfaces to be rendered in tabs." - }, - { - "name": "tab_names", - "annotation": "list[str] | None", - "doc": "a list of tab names. If None, the tab names will be \"Tab 1\", \"Tab 2\", etc.", - "default": "None" - }, - { - "name": "title", - "annotation": "str | None", - "doc": "a title for the interface; if provided, appears above the input and output components in large font. Also used as the tab title when opened in a browser window.", - "default": "None" - }, - { - "name": "theme", - "annotation": "Theme | None", - "doc": null, - "default": "None" - }, - { - "name": "analytics_enabled", - "annotation": "bool | None", - "doc": "whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable or default to True.", - "default": "None" - }, - { - "name": "css", - "annotation": "str | None", - "doc": "custom css or path to custom css file to apply to entire Blocks", - "default": "None" - } - ], - "returns": { - "annotation": null, - "doc": "a Gradio Tabbed Interface for the given interfaces" - }, - "example": null, - "fns": [], - "demos": [ - [ - "stt_or_tts", - "import gradio as gr\n\ntts_examples = [\n \"I love learning machine learning\",\n \"How do you do?\",\n]\n\ntts_demo = gr.load(\n \"huggingface/facebook/fastspeech2-en-ljspeech\",\n title=None,\n examples=tts_examples,\n description=\"Give me something to say!\",\n)\n\nstt_demo = gr.load(\n \"huggingface/facebook/wav2vec2-base-960h\",\n title=None,\n inputs=\"mic\",\n description=\"Let me try to guess what you're saying!\",\n)\n\ndemo = gr.TabbedInterface([tts_demo, stt_demo], [\"Text-to-speech\", \"Speech-to-text\"])\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "parent": "gradio" - }, - "parallel": { - "class": null, - "name": "Parallel", - "description": "Creates a new Interface consisting of multiple Interfaces in parallel (comparing their outputs). The Interfaces to put in Parallel must share the same input components (but can have different output components).
", - "tags": { - "demos": "interface_parallel, interface_parallel_load", - "guides": "advanced-interface-features" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "interfaces", - "annotation": "", - "doc": "any number of Interface objects that are to be compared in parallel" - }, - { - "name": "options", - "annotation": "", - "doc": "additional kwargs that are passed into the new Interface object to customize it", - "kwargs": true, - "args": true - } - ], - "returns": { - "annotation": null, - "doc": "an Interface object comparing the given models" - }, - "example": null, - "fns": [], - "demos": [ - [ - "interface_parallel", - "import gradio as gr\n\ngreeter_1 = gr.Interface(lambda name: f\"Hello {name}!\", inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeter 1\"))\ngreeter_2 = gr.Interface(lambda name: f\"Greetings {name}!\", inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeter 2\"))\ndemo = gr.Parallel(greeter_1, greeter_2)\n\nif __name__ == \"__main__\":\n demo.launch()" - ], - [ - "interface_parallel_load", - "import gradio as gr\n\ngenerator1 = gr.load(\"huggingface/gpt2\")\ngenerator2 = gr.load(\"huggingface/gpt2-xl\")\n\ndemo = gr.Parallel(generator1, generator2)\n\nif __name__ == \"__main__\":\n demo.launch()" - ] - ], - "guides": [ - { - "name": "advanced-interface-features", - "category": "building-interfaces", - "pretty_category": "Building Interfaces", - "guide_index": 4, - "absolute_index": 6, - "pretty_name": "Advanced Interface Features", - "content": "# Advanced Interface Features\n\nThere's more to cover on the [Interface](https://gradio.app/docs#interface) class. This guide covers all the advanced features: Using [Interpretation](https://gradio.app/docs#interpretation), custom styling, loading from the [Hugging Face Hub](https://hf.co), and using [Parallel](https://gradio.app/docs#parallel) and [Series](https://gradio.app/docs#series). \n\n## Interpreting your Predictions\n\nMost models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the `interpretation` keyword in the `Interface` class to `default`. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:\n\n```python\nimport requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2() # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n```\n\n\nIn addition to `default`, Gradio also includes [Shapley-based interpretation](https://christophm.github.io/interpretable-ml-book/shap.html), which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the `interpretation` parameter to `\"shap\"` (note: also make sure the python package `shap` is installed). Optionally, you can modify the `num_shap` parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:\n\n```python\ngr.Interface(fn=classify_image,\n inputs=image, \n outputs=label, \n interpretation=\"shap\", \n num_shap=5).launch()\n```\n\nThis will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's `default` or `shap` interpretation, the output component must be a `Label`. All common input components are supported. Here is an example with text input.\n\n```python\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=\"default\",\n)\n\ndemo.launch()\n\n```\n\nSo what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.\n\nYou can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.\n\n```python\nimport re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n result = gender_of_sentence(sentence)\n is_male = result[\"male\"] > result[\"female\"]\n interpretation = []\n for word in re.split(\"( )\", sentence):\n score = 0\n token = word.lower()\n if (is_male and token in male_words) or (not is_male and token in female_words):\n score = 1\n elif (is_male and token in female_words) or (\n not is_male and token in male_words\n ):\n score = -1\n interpretation.append((word, score))\n # Output must be a list of lists containing the same number of elements as inputs\n # Each element corresponds to the interpretation scores for the given input\n return [interpretation]\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n```\n\nLearn more about Interpretation in the [docs](https://gradio.app/docs#interpretation). \n\n## Custom Styling\n\nIf you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the `css` parameter of the `Interface` class.\n\n```python\ngr.Interface(..., css=\"body {background-color: red}\")\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\ngr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n```\n\n**Warning**: Custom CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using [Themes](/guides/theming-guide/) whenever possible. \n\n## Loading Hugging Face Models and Spaces\n\nGradio integrates nicely with the [Hugging Face Hub](https://hf.co), allowing you to load models and Spaces with just one line of code. To use this, simply use the `load()` method in the `Interface` class. So:\n\n- To load any model from the Hugging Face Hub and create an interface around it, you pass `\"model/\"` or `\"huggingface/\"` followed by the model name, like these examples:\n\n```python\ngr.Interface.load(\"huggingface/gpt2\").launch();\n```\n\n```python\ngr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n inputs=gr.Textbox(lines=5, label=\"Input Text\") # customizes the input component\n).launch()\n```\n\n- To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass `\"spaces/\"` followed by the model name:\n\n```python\ngr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n inputs=\"webcam\", \n title=\"Remove your webcam background!\").launch()\n```\n\nOne of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting `Interface` object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):\n\n```python\nio = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\") # outputs model completion\n```\n\n## Putting Interfaces in Parallel and Series\n\nGradio also lets you mix interfaces very easily using the `gradio.Parallel` and `gradio.Series` classes. `Parallel` lets you put two similar models (if they have the same input type) in parallel to compare model predictions:\n\n```python\ngenerator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n```\n\n`Series` lets you put models and spaces in series, piping the output of one model into the input of the next model. \n\n```python\ngenerator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch() \n# this demo generates text, then translates it to German, and outputs the final result.\n```\n\nAnd of course, you can also mix `Parallel` and `Series` together whenever that makes sense!\n\nLearn more about Parallel and Series in the [docs](https://gradio.app/docs#parallel). ", - "html": "

Advanced Interface Features

\n\n

There's more to cover on the Interface class. This guide covers all the advanced features: Using Interpretation, custom styling, loading from the Hugging Face Hub, and using Parallel and Series.

\n\n

Interpreting your Predictions

\n\n

Most models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the interpretation keyword in the Interface class to default. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:

\n\n
import requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2()  # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n    inp = inp.reshape((-1, 224, 224, 3))\n    inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n    prediction = inception_net.predict(inp).flatten()\n    return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n    fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n
\n\n

In addition to default, Gradio also includes Shapley-based interpretation, which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the interpretation parameter to \"shap\" (note: also make sure the python package shap is installed). Optionally, you can modify the num_shap parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:

\n\n
gr.Interface(fn=classify_image,\n            inputs=image, \n            outputs=label, \n            interpretation=\"shap\", \n            num_shap=5).launch()\n
\n\n

This will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's default or shap interpretation, the output component must be a Label. All common input components are supported. Here is an example with text input.

\n\n
import gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=\"default\",\n)\n\ndemo.launch()\n\n
\n\n

So what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.

\n\n

You can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.

\n\n
import re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n    result = gender_of_sentence(sentence)\n    is_male = result[\"male\"] > result[\"female\"]\n    interpretation = []\n    for word in re.split(\"( )\", sentence):\n        score = 0\n        token = word.lower()\n        if (is_male and token in male_words) or (not is_male and token in female_words):\n            score = 1\n        elif (is_male and token in female_words) or (\n            not is_male and token in male_words\n        ):\n            score = -1\n        interpretation.append((word, score))\n    # Output must be a list of lists containing the same number of elements as inputs\n    # Each element corresponds to the interpretation scores for the given input\n    return [interpretation]\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n
\n\n

Learn more about Interpretation in the docs.

\n\n

Custom Styling

\n\n

If you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the css parameter of the Interface class.

\n\n
gr.Interface(..., css=\"body {background-color: red}\")\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
gr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n
\n\n

Warning: Custom CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using Themes whenever possible.

\n\n

Loading Hugging Face Models and Spaces

\n\n

Gradio integrates nicely with the Hugging Face Hub, allowing you to load models and Spaces with just one line of code. To use this, simply use the load() method in the Interface class. So:

\n\n
    \n
  • To load any model from the Hugging Face Hub and create an interface around it, you pass \"model/\" or \"huggingface/\" followed by the model name, like these examples:
  • \n
\n\n
gr.Interface.load(\"huggingface/gpt2\").launch();\n
\n\n
gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n    inputs=gr.Textbox(lines=5, label=\"Input Text\")  # customizes the input component\n).launch()\n
\n\n
    \n
  • To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass \"spaces/\" followed by the model name:
  • \n
\n\n
gr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n                  inputs=\"webcam\", \n                  title=\"Remove your webcam background!\").launch()\n
\n\n

One of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting Interface object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):

\n\n
io = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\")  # outputs model completion\n
\n\n

Putting Interfaces in Parallel and Series

\n\n

Gradio also lets you mix interfaces very easily using the gradio.Parallel and gradio.Series classes. Parallel lets you put two similar models (if they have the same input type) in parallel to compare model predictions:

\n\n
generator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n
\n\n

Series lets you put models and spaces in series, piping the output of one model into the input of the next model.

\n\n
generator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch()  \n# this demo generates text, then translates it to German, and outputs the final result.\n
\n\n

And of course, you can also mix Parallel and Series together whenever that makes sense!

\n\n

Learn more about Parallel and Series in the docs.

\n", - "tags": [], - "spaces": [], - "url": "/guides/advanced-interface-features/", - "contributor": null - } - ], - "parent": "gradio" - }, - "series": { - "class": null, - "name": "Series", - "description": "Creates a new Interface from multiple Interfaces in series (the output of one is fed as the input to the next, and so the input and output components must agree between the interfaces).
", - "tags": { - "demos": "interface_series, interface_series_load", - "guides": "advanced-interface-features" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "interfaces", - "annotation": "", - "doc": "any number of Interface objects that are to be connected in series" - }, - { - "name": "options", - "annotation": "", - "doc": "additional kwargs that are passed into the new Interface object to customize it", - "kwargs": true, - "args": true - } - ], - "returns": { - "annotation": null, - "doc": "an Interface object connecting the given models" - }, - "example": null, - "fns": [], - "demos": [ - [ - "interface_series", - "import gradio as gr\n\nget_name = gr.Interface(lambda name: name, inputs=\"textbox\", outputs=\"textbox\")\nprepend_hello = gr.Interface(lambda name: f\"Hello {name}!\", inputs=\"textbox\", outputs=\"textbox\")\nappend_nice = gr.Interface(lambda greeting: f\"{greeting} Nice to meet you!\",\n inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeting\"))\ndemo = gr.Series(get_name, prepend_hello, append_nice)\n\nif __name__ == \"__main__\":\n demo.launch()" - ], - [ - "interface_series_load", - "import gradio as gr\n\ngenerator = gr.load(\"huggingface/gpt2\")\ntranslator = gr.load(\"huggingface/t5-small\")\n\ndemo = gr.Series(generator, translator, description=\"This demo combines two Spaces: a text generator (`huggingface/gpt2`) and a text translator (`huggingface/t5-small`). The first Space takes a prompt as input and generates a text. The second Space takes the generated text as input and translates it into another language.\")\n\nif __name__ == \"__main__\":\n demo.launch()" - ] - ], - "guides": [ - { - "name": "advanced-interface-features", - "category": "building-interfaces", - "pretty_category": "Building Interfaces", - "guide_index": 4, - "absolute_index": 6, - "pretty_name": "Advanced Interface Features", - "content": "# Advanced Interface Features\n\nThere's more to cover on the [Interface](https://gradio.app/docs#interface) class. This guide covers all the advanced features: Using [Interpretation](https://gradio.app/docs#interpretation), custom styling, loading from the [Hugging Face Hub](https://hf.co), and using [Parallel](https://gradio.app/docs#parallel) and [Series](https://gradio.app/docs#series). \n\n## Interpreting your Predictions\n\nMost models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the `interpretation` keyword in the `Interface` class to `default`. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:\n\n```python\nimport requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2() # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n```\n\n\nIn addition to `default`, Gradio also includes [Shapley-based interpretation](https://christophm.github.io/interpretable-ml-book/shap.html), which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the `interpretation` parameter to `\"shap\"` (note: also make sure the python package `shap` is installed). Optionally, you can modify the `num_shap` parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:\n\n```python\ngr.Interface(fn=classify_image,\n inputs=image, \n outputs=label, \n interpretation=\"shap\", \n num_shap=5).launch()\n```\n\nThis will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's `default` or `shap` interpretation, the output component must be a `Label`. All common input components are supported. Here is an example with text input.\n\n```python\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=\"default\",\n)\n\ndemo.launch()\n\n```\n\nSo what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.\n\nYou can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.\n\n```python\nimport re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n result = gender_of_sentence(sentence)\n is_male = result[\"male\"] > result[\"female\"]\n interpretation = []\n for word in re.split(\"( )\", sentence):\n score = 0\n token = word.lower()\n if (is_male and token in male_words) or (not is_male and token in female_words):\n score = 1\n elif (is_male and token in female_words) or (\n not is_male and token in male_words\n ):\n score = -1\n interpretation.append((word, score))\n # Output must be a list of lists containing the same number of elements as inputs\n # Each element corresponds to the interpretation scores for the given input\n return [interpretation]\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n```\n\nLearn more about Interpretation in the [docs](https://gradio.app/docs#interpretation). \n\n## Custom Styling\n\nIf you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the `css` parameter of the `Interface` class.\n\n```python\ngr.Interface(..., css=\"body {background-color: red}\")\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\ngr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n```\n\n**Warning**: Custom CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using [Themes](/guides/theming-guide/) whenever possible. \n\n## Loading Hugging Face Models and Spaces\n\nGradio integrates nicely with the [Hugging Face Hub](https://hf.co), allowing you to load models and Spaces with just one line of code. To use this, simply use the `load()` method in the `Interface` class. So:\n\n- To load any model from the Hugging Face Hub and create an interface around it, you pass `\"model/\"` or `\"huggingface/\"` followed by the model name, like these examples:\n\n```python\ngr.Interface.load(\"huggingface/gpt2\").launch();\n```\n\n```python\ngr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n inputs=gr.Textbox(lines=5, label=\"Input Text\") # customizes the input component\n).launch()\n```\n\n- To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass `\"spaces/\"` followed by the model name:\n\n```python\ngr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n inputs=\"webcam\", \n title=\"Remove your webcam background!\").launch()\n```\n\nOne of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting `Interface` object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):\n\n```python\nio = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\") # outputs model completion\n```\n\n## Putting Interfaces in Parallel and Series\n\nGradio also lets you mix interfaces very easily using the `gradio.Parallel` and `gradio.Series` classes. `Parallel` lets you put two similar models (if they have the same input type) in parallel to compare model predictions:\n\n```python\ngenerator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n```\n\n`Series` lets you put models and spaces in series, piping the output of one model into the input of the next model. \n\n```python\ngenerator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch() \n# this demo generates text, then translates it to German, and outputs the final result.\n```\n\nAnd of course, you can also mix `Parallel` and `Series` together whenever that makes sense!\n\nLearn more about Parallel and Series in the [docs](https://gradio.app/docs#parallel). ", - "html": "

Advanced Interface Features

\n\n

There's more to cover on the Interface class. This guide covers all the advanced features: Using Interpretation, custom styling, loading from the Hugging Face Hub, and using Parallel and Series.

\n\n

Interpreting your Predictions

\n\n

Most models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the interpretation keyword in the Interface class to default. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:

\n\n
import requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2()  # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n    inp = inp.reshape((-1, 224, 224, 3))\n    inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n    prediction = inception_net.predict(inp).flatten()\n    return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n    fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n
\n\n

In addition to default, Gradio also includes Shapley-based interpretation, which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the interpretation parameter to \"shap\" (note: also make sure the python package shap is installed). Optionally, you can modify the num_shap parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:

\n\n
gr.Interface(fn=classify_image,\n            inputs=image, \n            outputs=label, \n            interpretation=\"shap\", \n            num_shap=5).launch()\n
\n\n

This will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's default or shap interpretation, the output component must be a Label. All common input components are supported. Here is an example with text input.

\n\n
import gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=\"default\",\n)\n\ndemo.launch()\n\n
\n\n

So what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.

\n\n

You can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.

\n\n
import re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n    result = gender_of_sentence(sentence)\n    is_male = result[\"male\"] > result[\"female\"]\n    interpretation = []\n    for word in re.split(\"( )\", sentence):\n        score = 0\n        token = word.lower()\n        if (is_male and token in male_words) or (not is_male and token in female_words):\n            score = 1\n        elif (is_male and token in female_words) or (\n            not is_male and token in male_words\n        ):\n            score = -1\n        interpretation.append((word, score))\n    # Output must be a list of lists containing the same number of elements as inputs\n    # Each element corresponds to the interpretation scores for the given input\n    return [interpretation]\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n
\n\n

Learn more about Interpretation in the docs.

\n\n

Custom Styling

\n\n

If you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the css parameter of the Interface class.

\n\n
gr.Interface(..., css=\"body {background-color: red}\")\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
gr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n
\n\n

Warning: Custom CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using Themes whenever possible.

\n\n

Loading Hugging Face Models and Spaces

\n\n

Gradio integrates nicely with the Hugging Face Hub, allowing you to load models and Spaces with just one line of code. To use this, simply use the load() method in the Interface class. So:

\n\n
    \n
  • To load any model from the Hugging Face Hub and create an interface around it, you pass \"model/\" or \"huggingface/\" followed by the model name, like these examples:
  • \n
\n\n
gr.Interface.load(\"huggingface/gpt2\").launch();\n
\n\n
gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n    inputs=gr.Textbox(lines=5, label=\"Input Text\")  # customizes the input component\n).launch()\n
\n\n
    \n
  • To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass \"spaces/\" followed by the model name:
  • \n
\n\n
gr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n                  inputs=\"webcam\", \n                  title=\"Remove your webcam background!\").launch()\n
\n\n

One of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting Interface object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):

\n\n
io = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\")  # outputs model completion\n
\n\n

Putting Interfaces in Parallel and Series

\n\n

Gradio also lets you mix interfaces very easily using the gradio.Parallel and gradio.Series classes. Parallel lets you put two similar models (if they have the same input type) in parallel to compare model predictions:

\n\n
generator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n
\n\n

Series lets you put models and spaces in series, piping the output of one model into the input of the next model.

\n\n
generator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch()  \n# this demo generates text, then translates it to German, and outputs the final result.\n
\n\n

And of course, you can also mix Parallel and Series together whenever that makes sense!

\n\n

Learn more about Parallel and Series in the docs.

\n", - "tags": [], - "spaces": [], - "url": "/guides/advanced-interface-features/", - "contributor": null - } - ], - "parent": "gradio" - } - }, - "components": { - "annotatedimage": { - "class": null, - "name": "AnnotatedImage", - "description": "Displays a base image and colored subsections on top of that image. Subsections can take the from of rectangles (e.g. object detection) or masks (e.g. image segmentation).
", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a {Tuple[numpy.ndarray | PIL.Image | str, List[Tuple[numpy.ndarray | Tuple[int, int, int, int], str]]]} consisting of a base image and a list of subsections, that are either (x1, y1, x2, y2) tuples identifying object boundaries, or 0-1 confidence masks of the same shape as the image. A label is provided for each subsection.", - "demos": "image_segmentation" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "tuple[np.ndarray | _Image.Image | str, list[tuple[np.ndarray | tuple[int, int, int, int], str]]] | None", - "doc": "Tuple of base image and list of (subsection, label) pairs.", - "default": "None" - }, - { - "name": "show_legend", - "annotation": "bool", - "doc": "If True, will show a legend of the subsections.", - "default": "True" - }, - { - "name": "height", - "annotation": "int | None", - "doc": "Height of the displayed image.", - "default": "None" - }, - { - "name": "width", - "annotation": "int | None", - "doc": "Width of the displayed image.", - "default": "None" - }, - { - "name": "color_map", - "annotation": "dict[str, str] | None", - "doc": "A dictionary mapping labels to colors. The colors must be specified as hex codes.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects Image subsection. Uses event data gradio.SelectData to carry `value` referring to selected subsection label, and `index` to refer to subsection index. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.AnnotatedImage" - } - ], - "string_shortcuts": [ - ["AnnotatedImage", "annotatedimage", "Uses default values"] - ], - "demos": [ - [ - "image_segmentation", - "import gradio as gr\nimport numpy as np\nimport random\n\nwith gr.Blocks() as demo:\n section_labels = [\n \"apple\",\n \"banana\",\n \"carrot\",\n \"donut\",\n \"eggplant\",\n \"fish\",\n \"grapes\",\n \"hamburger\",\n \"ice cream\",\n \"juice\",\n ]\n\n with gr.Row():\n num_boxes = gr.Slider(0, 5, 2, step=1, label=\"Number of boxes\")\n num_segments = gr.Slider(0, 5, 1, step=1, label=\"Number of segments\")\n\n with gr.Row():\n img_input = gr.Image()\n img_output = gr.AnnotatedImage().style(\n color_map={\"banana\": \"#a89a00\", \"carrot\": \"#ffae00\"}\n )\n\n section_btn = gr.Button(\"Identify Sections\")\n selected_section = gr.Textbox(label=\"Selected Section\")\n\n def section(img, num_boxes, num_segments):\n sections = []\n for a in range(num_boxes):\n x = random.randint(0, img.shape[1])\n y = random.randint(0, img.shape[0])\n w = random.randint(0, img.shape[1] - x)\n h = random.randint(0, img.shape[0] - y)\n sections.append(((x, y, x + w, y + h), section_labels[a]))\n for b in range(num_segments):\n x = random.randint(0, img.shape[1])\n y = random.randint(0, img.shape[0])\n r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))\n mask = np.zeros(img.shape[:2])\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n dist_square = (i - y) ** 2 + (j - x) ** 2\n if dist_square < r**2:\n mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4\n sections.append((mask, section_labels[b + num_boxes]))\n return (img, sections)\n\n section_btn.click(section, [img_input, num_boxes, num_segments], img_output)\n\n def select_section(evt: gr.SelectData):\n return section_labels[evt.index]\n\n img_output.select(select_section, None, selected_section)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a Tuple[numpy.ndarray | PIL.Image | str, List[Tuple[numpy.ndarray | Tuple[int, int, int, int], str]]] consisting of a base image and a list of subsections, that are either (x1, y1, x2, y2) tuples identifying object boundaries, or 0-1 confidence masks of the same shape as the image. A label is provided for each subsection.", - "parent": "gradio", - "prev_obj": "Components", - "next_obj": "Audio" - }, - "audio": { - "class": null, - "name": "Audio", - "description": "Creates an audio component that can be used to upload/record audio (as an input) or display audio (as an output).", - "tags": { - "preprocessing": "passes the uploaded audio as a {Tuple(int, numpy.array)} corresponding to (sample rate in Hz, audio data as a 16-bit int array whose values range from -32768 to 32767), or as a {str} filepath, depending on `type`.", - "postprocessing": "expects a {Tuple(int, numpy.array)} corresponding to (sample rate in Hz, audio data as a float or int numpy array) or as a {str} or {pathlib.Path} filepath or URL to an audio file, which gets displayed", - "examples-format": "a {str} filepath to a local file that contains audio.", - "demos": "main_note, generate_tone, reverse_audio", - "guides": "real-time-speech-recognition" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | Path | tuple[int, np.ndarray] | Callable | None", - "doc": "A path, URL, or [sample_rate, numpy array] tuple (sample rate in Hz, audio data as a float or int numpy array) for the default value that Audio component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "source", - "annotation": "Literal['upload', 'microphone']", - "doc": "Source of audio. \"upload\" creates a box where user can drop an audio file, \"microphone\" creates a microphone input.", - "default": "\"upload\"" - }, - { - "name": "type", - "annotation": "Literal['numpy', 'filepath']", - "doc": "The format the audio file is converted to before being passed into the prediction function. \"numpy\" converts the audio to a tuple consisting of: (int sample rate, numpy.array for the data), \"filepath\" passes a str path to a temporary file containing the audio.", - "default": "\"numpy\"" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, will allow users to upload and edit a audio file; if False, can only be used to play audio. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "streaming", - "annotation": "bool", - "doc": "If set to True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'microphone'.", - "default": "False" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "format", - "annotation": "Literal['wav', 'mp3']", - "doc": "The file format to save audio files. Either 'wav' or 'mp3'. wav files are lossless but will tend to be larger files. mp3 files tend to be smaller. Default is wav. Applies both when this component is used as an input (when `type` is \"format\") and when this component is used as an output.", - "default": "\"wav\"" - }, - { - "name": "autoplay", - "annotation": "bool", - "doc": "Whether to automatically play the audio when the component is used as an output. Note: browsers will not autoplay audio files if the user has not interacted with the page yet.", - "default": "False" - }, - { - "name": "show_download_button", - "annotation": "", - "doc": "If True, will show a download button in the corner of the component for saving audio. If False, icon does not appear.", - "default": "True" - }, - { - "name": "show_share_button", - "annotation": "bool | None", - "doc": "If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Audio" - }, - { - "fn": null, - "name": "clear", - "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Audio" - }, - { - "fn": null, - "name": "play", - "description": "This listener is triggered when the user plays the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Audio" - }, - { - "fn": null, - "name": "pause", - "description": "This listener is triggered when the media stops playing for any reason (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Audio" - }, - { - "fn": null, - "name": "stop", - "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Audio" - }, - { - "fn": null, - "name": "end", - "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Audio" - }, - { - "fn": null, - "name": "stream", - "description": "This listener is triggered when the user streams the component (e.g. a live webcam component). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Audio" - }, - { - "fn": null, - "name": "start_recording", - "description": "This listener is triggered when the user starts recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Audio" - }, - { - "fn": null, - "name": "stop_recording", - "description": "This listener is triggered when the user stops recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Audio" - }, - { - "fn": null, - "name": "upload", - "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Audio" - } - ], - "string_shortcuts": [ - ["Audio", "audio", "Uses default values"], - ["Microphone", "microphone", "Uses source=\"microphone\""] - ], - "demos": [ - [ - "main_note", - "from math import log2, pow\nimport os\n\nimport numpy as np\nfrom scipy.fftpack import fft\n\nimport gradio as gr\n\nA4 = 440\nC0 = A4 * pow(2, -4.75)\nname = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n\n\ndef get_pitch(freq):\n h = round(12 * log2(freq / C0))\n n = h % 12\n return name[n]\n\n\ndef main_note(audio):\n rate, y = audio\n if len(y.shape) == 2:\n y = y.T[0]\n N = len(y)\n T = 1.0 / rate\n yf = fft(y)\n yf2 = 2.0 / N * np.abs(yf[0 : N // 2])\n xf = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)\n\n volume_per_pitch = {}\n total_volume = np.sum(yf2)\n for freq, volume in zip(xf, yf2):\n if freq == 0:\n continue\n pitch = get_pitch(freq)\n if pitch not in volume_per_pitch:\n volume_per_pitch[pitch] = 0\n volume_per_pitch[pitch] += 1.0 * volume / total_volume\n volume_per_pitch = {k: float(v) for k, v in volume_per_pitch.items()}\n return volume_per_pitch\n\n\ndemo = gr.Interface(\n main_note,\n gr.Audio(source=\"microphone\"),\n gr.Label(num_top_classes=4),\n examples=[\n [os.path.join(os.path.dirname(__file__),\"audio/recording1.wav\")],\n [os.path.join(os.path.dirname(__file__),\"audio/cantina.wav\")],\n ],\n interpretation=\"default\",\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "generate_tone", - "import numpy as np\nimport gradio as gr\n\nnotes = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n\ndef generate_tone(note, octave, duration):\n sr = 48000\n a4_freq, tones_from_a4 = 440, 12 * (octave - 4) + (note - 9)\n frequency = a4_freq * 2 ** (tones_from_a4 / 12)\n duration = int(duration)\n audio = np.linspace(0, duration, duration * sr)\n audio = (20000 * np.sin(audio * (2 * np.pi * frequency))).astype(np.int16)\n return sr, audio\n\ndemo = gr.Interface(\n generate_tone,\n [\n gr.Dropdown(notes, type=\"index\"),\n gr.Slider(4, 6, step=1),\n gr.Textbox(value=1, label=\"Duration in seconds\"),\n ],\n \"audio\",\n)\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "reverse_audio", - "import os\n\nimport numpy as np\n\nimport gradio as gr\n\n\ndef reverse_audio(audio):\n sr, data = audio\n return (sr, np.flipud(data))\n\n\ndemo = gr.Interface(fn=reverse_audio, \n inputs=\"microphone\", \n outputs=\"audio\", \n examples=[\n \"https://samplelib.com/lib/preview/mp3/sample-3s.mp3\",\n os.path.join(os.path.dirname(__file__), \"audio/recording1.wav\")\n ], cache_examples=True)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [ - { - "name": "real-time-speech-recognition", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 40, - "pretty_name": "Real Time Speech Recognition", - "content": "# Real Time Speech Recognition \n\n\n\n\n## Introduction\n\nAutomatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).\n\nUsing `gradio`, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.\n\nThis tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a ***full-context*** model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it ***streaming***, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused)!):\n\n\n\nReal-time ASR is inherently *stateful*, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use **state** with Gradio demos. \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:\n\n* Transformers (for this, `pip install transformers` and `pip install torch`) \n* DeepSpeech (`pip install deepspeech==0.8.2`)\n\nMake sure you have at least one of these installed so that you can follow along the tutorial. You will also need `ffmpeg` [installed on your system](https://www.ffmpeg.org/download.html), if you do not already have it, to process files from the microphone.\n\nHere's how to build a real time speech recognition (ASR) app: \n\n1. [Set up the Transformers ASR Model](#1-set-up-the-transformers-asr-model)\n2. [Create a Full-Context ASR Demo with Transformers](#2-create-a-full-context-asr-demo-with-transformers) \n3. [Create a Streaming ASR Demo with Transformers](#3-create-a-streaming-asr-demo-with-transformers)\n4. [Create a Streaming ASR Demo with DeepSpeech](#4-create-a-streaming-asr-demo-with-deep-speech)\n\n\n## 1. Set up the Transformers ASR Model\n\nFirst, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, `Wav2Vec2`. \n\nHere is the code to load `Wav2Vec2` from Hugging Face `transformers`.\n\n```python\nfrom transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n```\n\nThat's it! By default, the automatic speech recognition model pipeline loads Facebook's `facebook/wav2vec2-base-960h` model.\n\n## 2. Create a Full-Context ASR Demo with Transformers \n\nWe will start by creating a *full-context* ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the `pipeline` object above.\n\nWe will use `gradio`'s built in `Audio` component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain `Textbox`.\n\n```python\nimport gradio as gr\n\ndef transcribe(audio):\n text = p(audio)[\"text\"]\n return text\n\ngr.Interface(\n fn=transcribe, \n inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n outputs=\"text\").launch()\n```\n\nSo what's happening here? The `transcribe` function takes a single parameter, `audio`, which is a filepath to the audio file that the user has recorded. The `pipeline` object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox. \n\nLet's see it in action! (Record a short audio clip and then click submit, or [open in a new tab](https://huggingface.co/spaces/abidlabs/full-context-asr)):\n\n\n\n## 3. Create a Streaming ASR Demo with Transformers\n\nOk great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a *streaming* interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.\n\nThe good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same `Wav2Vec2` model. \n\nThe biggest change is that we must now introduce a `state` parameter, which holds the audio that has been *transcribed so far*. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed. \n\nWhen adding state to a Gradio demo, you need to do a total of 3 things:\n\n* Add a `state` parameter to the function\n* Return the updated `state` at the end of the function\n* Add the `\"state\"` components to the `inputs` and `outputs` in `Interface` \n\nHere's what the code looks like:\n\n```python\ndef transcribe(audio, state=\"\"):\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\" \n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nNotice that we've also made one other change, which is that we've set `live=True`. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.\n\nLet's see how it does (try below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr))!\n\n\n\n\nOne thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the `transcribe()` function so that longer audio chunks are processed. We can do this by adding a `time.sleep()` inside the function, as shown below (we'll see a proper fix next) \n\n```python\nfrom transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n time.sleep(2)\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\"\n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nTry the demo below to see the difference (or [open in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused))!\n\n\n\n\n## 4. Create a Streaming ASR Demo with DeepSpeech\n\nYou're not restricted to ASR models from the `transformers` library -- you can use your own models or models from other libraries. The `DeepSpeech` library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.\n\nGoing through the DeepSpeech library is beyond the scope of this Guide (check out their [excellent documentation here](https://deepspeech.readthedocs.io/en/r0.9/)), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model. \n\nHere's a complete example (on Linux):\n\nFirst install the DeepSpeech library and download the pretrained models from the terminal:\n\n```bash\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n```\n\nThen, create a similar `transcribe()` function as before:\n\n```python\nfrom deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n if sr not in (\n 48000,\n 16000,\n ): # Deepspeech only supports 16k, (we convert 48k -> 16k)\n raise ValueError(\"Unsupported rate\", sr)\n if sr == 48000:\n y = (\n ((y / max(np.max(y), 1)) * 32767)\n .reshape((-1, 3))\n .mean(axis=1)\n .astype(\"int16\")\n )\n sr = 16000\n return sr, y\n\n\ndef transcribe(speech, stream):\n _, y = reformat_freq(*speech)\n if stream is None:\n stream = model.createStream()\n stream.feedAudioContent(y)\n text = stream.intermediateDecode()\n return text, stream\n\n```\n\nThen, create a Gradio Interface as before (the only difference being that the return type should be `numpy` instead of a `filepath` to be compatible with the DeepSpeech models)\n\n```python\nimport gradio as gr\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"numpy\"), \n \"state\" \n ], \n outputs= [\n \"text\", \n \"state\"\n ], \n live=True).launch()\n```\n\nRunning all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.\n\n--------------------------------------------\n\n\nAnd you're done! That's all the code you need to build a web-based GUI for your ASR model. \n\nFun tip: you can share your ASR model instantly with others simply by setting `share=True` in `launch()`. \n\n\n", - "html": "

Real Time Speech Recognition

\n\n

Introduction

\n\n

Automatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).

\n\n

Using gradio, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.

\n\n

This tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a full-context model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it streaming, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or in a new tab!):

\n\n\n\n

Real-time ASR is inherently stateful, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use state with Gradio demos.

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:

\n\n
    \n
  • Transformers (for this, pip install transformers and pip install torch)
  • \n
  • DeepSpeech (pip install deepspeech==0.8.2)
  • \n
\n\n

Make sure you have at least one of these installed so that you can follow along the tutorial. You will also need ffmpeg installed on your system, if you do not already have it, to process files from the microphone.

\n\n

Here's how to build a real time speech recognition (ASR) app:

\n\n
    \n
  1. Set up the Transformers ASR Model
  2. \n
  3. Create a Full-Context ASR Demo with Transformers
  4. \n
  5. Create a Streaming ASR Demo with Transformers
  6. \n
  7. Create a Streaming ASR Demo with DeepSpeech
  8. \n
\n\n

1. Set up the Transformers ASR Model

\n\n

First, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, Wav2Vec2.

\n\n

Here is the code to load Wav2Vec2 from Hugging Face transformers.

\n\n
from transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n
\n\n

That's it! By default, the automatic speech recognition model pipeline loads Facebook's facebook/wav2vec2-base-960h model.

\n\n

2. Create a Full-Context ASR Demo with Transformers

\n\n

We will start by creating a full-context ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the pipeline object above.

\n\n

We will use gradio's built in Audio component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain Textbox.

\n\n
import gradio as gr\n\ndef transcribe(audio):\n    text = p(audio)[\"text\"]\n    return text\n\ngr.Interface(\n    fn=transcribe, \n    inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n    outputs=\"text\").launch()\n
\n\n

So what's happening here? The transcribe function takes a single parameter, audio, which is a filepath to the audio file that the user has recorded. The pipeline object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox.

\n\n

Let's see it in action! (Record a short audio clip and then click submit, or open in a new tab):

\n\n\n\n

3. Create a Streaming ASR Demo with Transformers

\n\n

Ok great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a streaming interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.

\n\n

The good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same Wav2Vec2 model.

\n\n

The biggest change is that we must now introduce a state parameter, which holds the audio that has been transcribed so far. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed.

\n\n

When adding state to a Gradio demo, you need to do a total of 3 things:

\n\n
    \n
  • Add a state parameter to the function
  • \n
  • Return the updated state at the end of the function
  • \n
  • Add the \"state\" components to the inputs and outputs in Interface
  • \n
\n\n

Here's what the code looks like:

\n\n
def transcribe(audio, state=\"\"):\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\" \n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Notice that we've also made one other change, which is that we've set live=True. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.

\n\n

Let's see how it does (try below or in a new tab)!

\n\n\n\n

One thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the transcribe() function so that longer audio chunks are processed. We can do this by adding a time.sleep() inside the function, as shown below (we'll see a proper fix next)

\n\n
from transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n    time.sleep(2)\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\"\n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Try the demo below to see the difference (or open in a new tab)!

\n\n\n\n

4. Create a Streaming ASR Demo with DeepSpeech

\n\n

You're not restricted to ASR models from the transformers library -- you can use your own models or models from other libraries. The DeepSpeech library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.

\n\n

Going through the DeepSpeech library is beyond the scope of this Guide (check out their excellent documentation here), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model.

\n\n

Here's a complete example (on Linux):

\n\n

First install the DeepSpeech library and download the pretrained models from the terminal:

\n\n
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n
\n\n

Then, create a similar transcribe() function as before:

\n\n
from deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n    if sr not in (\n        48000,\n        16000,\n    ):  # Deepspeech only supports 16k, (we convert 48k -> 16k)\n        raise ValueError(\"Unsupported rate\", sr)\n    if sr == 48000:\n        y = (\n            ((y / max(np.max(y), 1)) * 32767)\n            .reshape((-1, 3))\n            .mean(axis=1)\n            .astype(\"int16\")\n        )\n        sr = 16000\n    return sr, y\n\n\ndef transcribe(speech, stream):\n    _, y = reformat_freq(*speech)\n    if stream is None:\n        stream = model.createStream()\n    stream.feedAudioContent(y)\n    text = stream.intermediateDecode()\n    return text, stream\n\n
\n\n

Then, create a Gradio Interface as before (the only difference being that the return type should be numpy instead of a filepath to be compatible with the DeepSpeech models)

\n\n
import gradio as gr\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"numpy\"), \n        \"state\" \n    ], \n    outputs= [\n        \"text\", \n        \"state\"\n    ], \n    live=True).launch()\n
\n\n

Running all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.

\n\n
\n\n

And you're done! That's all the code you need to build a web-based GUI for your ASR model.

\n\n

Fun tip: you can share your ASR model instantly with others simply by setting share=True in launch().

\n", - "tags": ["ASR", "SPEECH", "STREAMING"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/streaming-asr-paused", - "https://huggingface.co/spaces/abidlabs/full-context-asr" - ], - "url": "/guides/real-time-speech-recognition/", - "contributor": null - } - ], - "preprocessing": "passes the uploaded audio as a Tuple(int, numpy.array) corresponding to (sample rate in Hz, audio data as a 16-bit int array whose values range from -32768 to 32767), or as a str filepath, depending on `type`.", - "postprocessing": "expects a Tuple(int, numpy.array) corresponding to (sample rate in Hz, audio data as a float or int numpy array) or as a str or pathlib.Path filepath or URL to an audio file, which gets displayed", - "examples-format": "a str filepath to a local file that contains audio.", - "parent": "gradio", - "prev_obj": "AnnotatedImage", - "next_obj": "BarPlot" - }, - "barplot": { - "class": null, - "name": "BarPlot", - "description": "Create a bar plot.

", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a pandas dataframe with the data to plot.", - "demos": "bar_plot, chicago-bikeshare-dashboard" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "pd.DataFrame | Callable | None", - "doc": "The pandas dataframe containing the data to display in a scatter plot.", - "default": "None" - }, - { - "name": "x", - "annotation": "str | None", - "doc": "Column corresponding to the x axis.", - "default": "None" - }, - { - "name": "y", - "annotation": "str | None", - "doc": "Column corresponding to the y axis.", - "default": "None" - }, - { - "name": "color", - "annotation": "str | None", - "doc": "The column to determine the bar color. Must be categorical (discrete values).", - "default": "None" - }, - { - "name": "vertical", - "annotation": "bool", - "doc": "If True, the bars will be displayed vertically. If False, the x and y axis will be switched, displaying the bars horizontally. Default is True.", - "default": "True" - }, - { - "name": "group", - "annotation": "str | None", - "doc": "The column with which to split the overall plot into smaller subplots.", - "default": "None" - }, - { - "name": "title", - "annotation": "str | None", - "doc": "The title to display on top of the chart.", - "default": "None" - }, - { - "name": "tooltip", - "annotation": "list[str] | str | None", - "doc": "The column (or list of columns) to display on the tooltip when a user hovers over a bar.", - "default": "None" - }, - { - "name": "x_title", - "annotation": "str | None", - "doc": "The title given to the x axis. By default, uses the value of the x parameter.", - "default": "None" - }, - { - "name": "y_title", - "annotation": "str | None", - "doc": "The title given to the y axis. By default, uses the value of the y parameter.", - "default": "None" - }, - { - "name": "color_legend_title", - "annotation": "str | None", - "doc": "The title given to the color legend. By default, uses the value of color parameter.", - "default": "None" - }, - { - "name": "group_title", - "annotation": "str | None", - "doc": "The label displayed on top of the subplot columns (or rows if vertical=True). Use an empty string to omit.", - "default": "None" - }, - { - "name": "color_legend_position", - "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", - "doc": "The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", - "default": "None" - }, - { - "name": "height", - "annotation": "int | None", - "doc": "The height of the plot in pixels.", - "default": "None" - }, - { - "name": "width", - "annotation": "int | None", - "doc": "The width of the plot in pixels.", - "default": "None" - }, - { - "name": "y_lim", - "annotation": "list[int] | None", - "doc": "A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].", - "default": "None" - }, - { - "name": "caption", - "annotation": "str | None", - "doc": "The (optional) caption to display below the plot.", - "default": "None" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.", - "default": "True" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "The (optional) label to display on the top left corner of the plot.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "Whether the label should be displayed.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": null, - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": null, - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": null, - "default": "160" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "Whether the plot should be visible.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.BarPlot" - }, - { - "fn": null, - "name": "clear", - "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.BarPlot" - } - ], - "string_shortcuts": [["BarPlot", "barplot", "Uses default values"]], - "demos": [ - [ - "bar_plot", - "import gradio as gr\nimport pandas as pd\nimport random\n\nsimple = pd.DataFrame(\n {\n \"a\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\"],\n \"b\": [28, 55, 43, 91, 81, 53, 19, 87, 52],\n }\n)\n\nfake_barley = pd.DataFrame(\n {\n \"site\": [\n random.choice(\n [\n \"University Farm\",\n \"Waseca\",\n \"Morris\",\n \"Crookston\",\n \"Grand Rapids\",\n \"Duluth\",\n ]\n )\n for _ in range(120)\n ],\n \"yield\": [random.randint(25, 75) for _ in range(120)],\n \"variety\": [\n random.choice(\n [\n \"Manchuria\",\n \"Wisconsin No. 38\",\n \"Glabron\",\n \"No. 457\",\n \"No. 462\",\n \"No. 475\",\n ]\n )\n for _ in range(120)\n ],\n \"year\": [\n random.choice(\n [\n \"1931\",\n \"1932\",\n ]\n )\n for _ in range(120)\n ],\n }\n)\n\n\ndef bar_plot_fn(display):\n if display == \"simple\":\n return gr.BarPlot.update(\n simple,\n x=\"a\",\n y=\"b\",\n title=\"Simple Bar Plot with made up data\",\n tooltip=[\"a\", \"b\"],\n y_lim=[20, 100],\n )\n elif display == \"stacked\":\n return gr.BarPlot.update(\n fake_barley,\n x=\"variety\",\n y=\"yield\",\n color=\"site\",\n title=\"Barley Yield Data\",\n tooltip=[\"variety\", \"site\"],\n )\n elif display == \"grouped\":\n return gr.BarPlot.update(\n fake_barley.astype({\"year\": str}),\n x=\"year\",\n y=\"yield\",\n color=\"year\",\n group=\"site\",\n title=\"Barley Yield by Year and Site\",\n group_title=\"\",\n tooltip=[\"yield\", \"site\", \"year\"],\n )\n elif display == \"simple-horizontal\":\n return gr.BarPlot.update(\n simple,\n x=\"a\",\n y=\"b\",\n x_title=\"Variable A\",\n y_title=\"Variable B\",\n title=\"Simple Bar Plot with made up data\",\n tooltip=[\"a\", \"b\"],\n vertical=False,\n y_lim=[20, 100],\n )\n elif display == \"stacked-horizontal\":\n return gr.BarPlot.update(\n fake_barley,\n x=\"variety\",\n y=\"yield\",\n color=\"site\",\n title=\"Barley Yield Data\",\n vertical=False,\n tooltip=[\"variety\", \"site\"],\n )\n elif display == \"grouped-horizontal\":\n return gr.BarPlot.update(\n fake_barley.astype({\"year\": str}),\n x=\"year\",\n y=\"yield\",\n color=\"year\",\n group=\"site\",\n title=\"Barley Yield by Year and Site\",\n group_title=\"\",\n tooltip=[\"yield\", \"site\", \"year\"],\n vertical=False,\n )\n\n\nwith gr.Blocks() as bar_plot:\n with gr.Row():\n with gr.Column():\n display = gr.Dropdown(\n choices=[\n \"simple\",\n \"stacked\",\n \"grouped\",\n \"simple-horizontal\",\n \"stacked-horizontal\",\n \"grouped-horizontal\",\n ],\n value=\"simple\",\n label=\"Type of Bar Plot\",\n )\n with gr.Column():\n plot = gr.BarPlot()\n display.change(bar_plot_fn, inputs=display, outputs=plot)\n bar_plot.load(fn=bar_plot_fn, inputs=display, outputs=plot)\n\nbar_plot.launch()\n" - ], - [ - "chicago-bikeshare-dashboard", - "import os\nimport gradio as gr\nimport pandas as pd\n\nDB_USER = os.getenv(\"DB_USER\")\nDB_PASSWORD = os.getenv(\"DB_PASSWORD\")\nDB_HOST = os.getenv(\"DB_HOST\")\nPORT = 8080\nDB_NAME = \"bikeshare\"\n\nconnection_string = (\n f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}\"\n)\n\n\ndef get_count_ride_type():\n df = pd.read_sql(\n \"\"\"\n SELECT COUNT(ride_id) as n, rideable_type\n FROM rides\n GROUP BY rideable_type\n ORDER BY n DESC\n \"\"\",\n con=connection_string,\n )\n return df\n\n\ndef get_most_popular_stations():\n\n df = pd.read_sql(\n \"\"\"\n SELECT COUNT(ride_id) as n, MAX(start_station_name) as station\n FROM RIDES\n WHERE start_station_name is NOT NULL\n GROUP BY start_station_id\n ORDER BY n DESC\n LIMIT 5\n \"\"\",\n con=connection_string,\n )\n return df\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Chicago Bike Share Dashboard\n \n This demo pulls Chicago bike share data for March 2022 from a postgresql database hosted on AWS.\n This demo uses psycopg2 but any postgresql client library (SQLAlchemy)\n is compatible with gradio.\n \n Connection credentials are handled by environment variables\n defined as secrets in the Space.\n\n If data were added to the database, the plots in this demo would update\n whenever the webpage is reloaded.\n \n This demo serves as a starting point for your database-connected apps!\n \"\"\"\n )\n with gr.Row():\n bike_type = gr.BarPlot(\n x=\"rideable_type\",\n y='n',\n title=\"Number of rides per bicycle type\",\n y_title=\"Number of Rides\",\n x_title=\"Bicycle Type\",\n vertical=False,\n tooltip=['rideable_type', \"n\"],\n height=300,\n width=300,\n )\n station = gr.BarPlot(\n x='station',\n y='n',\n title=\"Most Popular Stations\",\n y_title=\"Number of Rides\",\n x_title=\"Station Name\",\n vertical=False,\n tooltip=['station', 'n'],\n height=300,\n width=300\n )\n\n demo.load(get_count_ride_type, inputs=None, outputs=bike_type)\n demo.load(get_most_popular_stations, inputs=None, outputs=station)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a pandas dataframe with the data to plot.", - "parent": "gradio", - "prev_obj": "Audio", - "next_obj": "Button" - }, - "button": { - "class": null, - "name": "Button", - "description": "Used to create a button, that can be assigned arbitrary click() events. The label (value) of the button can be used as an input or set via the output of a function.
", - "tags": { - "preprocessing": "passes the button value as a {str} into the function", - "postprocessing": "expects a {str} to be returned from a function, which is set as the label of the button", - "demos": "blocks_inputs, blocks_kinematics" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | Callable", - "doc": "Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "\"Run\"" - }, - { - "name": "variant", - "annotation": "Literal['primary', 'secondary', 'stop']", - "doc": "'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button.", - "default": "\"secondary\"" - }, - { - "name": "size", - "annotation": "Literal['sm', 'lg'] | None", - "doc": "Size of the button. Can be \"sm\" or \"lg\".", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "interactive", - "annotation": "bool", - "doc": "If False, the Button will be in a disabled state.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int | None", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "click", - "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Button" - } - ], - "string_shortcuts": [ - ["Button", "button", "Uses default values"], - ["ClearButton", "clearbutton", "Uses default values"], - ["DuplicateButton", "duplicatebutton", "Uses default values"] - ], - "demos": [ - [ - "blocks_inputs", - "import gradio as gr\nimport os\n\n\ndef combine(a, b):\n return a + \" \" + b\n\n\ndef mirror(x):\n return x\n\n\nwith gr.Blocks() as demo:\n\n txt = gr.Textbox(label=\"Input\", lines=2)\n txt_2 = gr.Textbox(label=\"Input 2\")\n txt_3 = gr.Textbox(value=\"\", label=\"Output\")\n btn = gr.Button(value=\"Submit\")\n btn.click(combine, inputs=[txt, txt_2], outputs=[txt_3])\n\n with gr.Row():\n im = gr.Image()\n im_2 = gr.Image()\n\n btn = gr.Button(value=\"Mirror Image\")\n btn.click(mirror, inputs=[im], outputs=[im_2])\n\n gr.Markdown(\"## Text Examples\")\n gr.Examples(\n [[\"hi\", \"Adam\"], [\"hello\", \"Eve\"]],\n [txt, txt_2],\n txt_3,\n combine,\n cache_examples=True,\n )\n gr.Markdown(\"## Image Examples\")\n gr.Examples(\n examples=[os.path.join(os.path.dirname(__file__), \"lion.jpg\")],\n inputs=im,\n outputs=im_2,\n fn=mirror,\n cache_examples=True,\n )\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "blocks_kinematics", - "import pandas as pd\nimport numpy as np\n\nimport gradio as gr\n\n\ndef plot(v, a):\n g = 9.81\n theta = a / 180 * 3.14\n tmax = ((2 * v) * np.sin(theta)) / g\n timemat = tmax * np.linspace(0, 1, 40)\n\n x = (v * timemat) * np.cos(theta)\n y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat**2))\n df = pd.DataFrame({\"x\": x, \"y\": y})\n return df\n\n\ndemo = gr.Blocks()\n\nwith demo:\n gr.Markdown(\n r\"Let's do some kinematics! Choose the speed and angle to see the trajectory. Remember that the range $R = v_0^2 \\cdot \\frac{\\sin(2\\theta)}{g}$\"\n )\n\n with gr.Row():\n speed = gr.Slider(1, 30, 25, label=\"Speed\")\n angle = gr.Slider(0, 90, 45, label=\"Angle\")\n output = gr.LinePlot(\n x=\"x\",\n y=\"y\",\n overlay_point=True,\n tooltip=[\"x\", \"y\"],\n x_lim=[0, 100],\n y_lim=[0, 60],\n width=350,\n height=300,\n )\n btn = gr.Button(value=\"Run\")\n btn.click(plot, [speed, angle], output)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "passes the button value as a str into the function", - "postprocessing": "expects a str to be returned from a function, which is set as the label of the button", - "parent": "gradio", - "prev_obj": "BarPlot", - "next_obj": "Chatbot" - }, - "chatbot": { - "class": null, - "name": "Chatbot", - "description": "Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, tables. Also supports audio/video/image files, which are displayed in the Chatbot, and other kinds of files which are displayed as links.
", - "tags": { - "preprocessing": "passes the messages in the Chatbot as a {List[List[str | None | Tuple]]}, i.e. a list of lists. The inner list has 2 elements: the user message and the response message. See `Postprocessing` for the format of these messages.", - "postprocessing": "expects function to return a {List[List[str | None | Tuple]]}, i.e. a list of lists. The inner list should have 2 elements: the user message and the response message. The individual messages can be (1) strings in valid Markdown, (2) tuples if sending files: (a filepath or URL to a file, [optional string alt text]) -- if the file is image/video/audio, it is displayed in the Chatbot, or (3) None, in which case the message is not displayed.", - "demos": "chatbot_simple, chatbot_multimodal", - "guides": "creating-a-chatbot" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "list[list[str | tuple[str] | tuple[str | Path, str] | None]] | Callable | None", - "doc": "Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "color_map", - "annotation": "dict[str, str] | None", - "doc": "This parameter is deprecated.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "height", - "annotation": "int | None", - "doc": "height of the component in pixels.", - "default": "None" - }, - { - "name": "latex_delimiters", - "annotation": "list[dict[str, str | bool]] | None", - "doc": "A list of dicts of the form {\"left\": open delimiter (str), \"right\": close delimiter (str), \"display\": whether to display in newline (bool)} that will be used to render LaTeX expressions. If not provided, `latex_delimiters` is set to `[{ \"left\": \"$$\", \"right\": \"$$\", \"display\": True }]`, so only expressions enclosed in $$ delimiters will be rendered as LaTeX, and in a new line. Pass in an empty list to disable LaTeX rendering. For more information, see the [KaTeX documentation](https://katex.org/docs/autorender.html).", - "default": "None" - }, - { - "name": "rtl", - "annotation": "bool", - "doc": "If True, sets the direction of the rendered text to right-to-left. Default is False, which renders text left-to-right.", - "default": "False" - }, - { - "name": "show_share_button", - "annotation": "bool | None", - "doc": "If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Chatbot" - }, - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects message from Chatbot. Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Chatbot" - } - ], - "string_shortcuts": [["Chatbot", "chatbot", "Uses default values"]], - "demos": [ - [ - "chatbot_simple", - "import gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.ClearButton([msg, chatbot])\n\n def respond(message, chat_history):\n bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n chat_history.append((message, bot_message))\n time.sleep(2)\n return \"\", chat_history\n\n msg.submit(respond, [msg, chatbot], [msg, chatbot])\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "chatbot_multimodal", - "import gradio as gr\nimport random\nimport time\n\n# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n\ndef add_text(history, text):\n history = history + [(text, None)]\n return history, gr.update(value=\"\", interactive=False)\n\n\ndef add_file(history, file):\n history = history + [((file.name,), None)]\n return history\n\n\ndef bot(history):\n response = \"**That's cool!**\"\n history[-1][1] = \"\"\n for character in response:\n history[-1][1] += character\n time.sleep(0.05)\n yield history\n\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot([], elem_id=\"chatbot\").style(height=750)\n\n with gr.Row():\n with gr.Column(scale=0.85):\n txt = gr.Textbox(\n show_label=False,\n placeholder=\"Enter text and press enter, or upload an image\",\n ).style(container=False)\n with gr.Column(scale=0.15, min_width=0):\n btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n\n txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n bot, chatbot, chatbot\n )\n txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n\ndemo.queue()\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [], - "preprocessing": "passes the messages in the Chatbot as a List[List[str | None | Tuple]], i.e. a list of lists. The inner list has 2 elements: the user message and the response message. See `Postprocessing` for the format of these messages.", - "postprocessing": "expects function to return a List[List[str | None | Tuple]], i.e. a list of lists. The inner list should have 2 elements: the user message and the response message. The individual messages can be (1) strings in valid Markdown, (2) tuples if sending files: (a filepath or URL to a file, [optional string alt text]) -- if the file is image/video/audio, it is displayed in the Chatbot, or (3) None, in which case the message is not displayed.", - "parent": "gradio", - "prev_obj": "Button", - "next_obj": "Checkbox" - }, - "checkbox": { - "class": null, - "name": "Checkbox", - "description": "Creates a checkbox that can be set to `True` or `False`.
", - "tags": { - "preprocessing": "passes the status of the checkbox as a {bool} into the function.", - "postprocessing": "expects a {bool} returned from the function and, if it is True, checks the checkbox.", - "examples-format": "a {bool} representing whether the box is checked.", - "demos": "sentence_builder, titanic_survival" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "bool | Callable", - "doc": "if True, checked by default. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "False" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "info", - "annotation": "str | None", - "doc": "additional component description.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, this checkbox can be checked; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Checkbox" - }, - { - "fn": null, - "name": "input", - "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Checkbox" - }, - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects or deselects Checkbox. Uses event data gradio.SelectData to carry `value` referring to label of checkbox, and `selected` to refer to state of checkbox. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Checkbox" - } - ], - "string_shortcuts": [["Checkbox", "checkbox", "Uses default values"]], - "demos": [ - [ - "sentence_builder", - "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "titanic_survival", - "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "passes the status of the checkbox as a bool into the function.", - "postprocessing": "expects a bool returned from the function and, if it is True, checks the checkbox.", - "examples-format": "a bool representing whether the box is checked.", - "parent": "gradio", - "prev_obj": "Chatbot", - "next_obj": "CheckboxGroup" - }, - "checkboxgroup": { - "class": null, - "name": "CheckboxGroup", - "description": "Creates a set of checkboxes of which a subset can be checked.", - "tags": { - "preprocessing": "passes the list of checked checkboxes as a {List[str]} or their indices as a {List[int]} into the function, depending on `type`.", - "postprocessing": "expects a {List[str]}, each element of which becomes a checked checkbox.", - "examples-format": "a {List[str]} representing the values to be checked.", - "demos": "sentence_builder, titanic_survival" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "choices", - "annotation": "list[str] | None", - "doc": "list of options to select from.", - "default": "None" - }, - { - "name": "value", - "annotation": "list[str] | str | Callable | None", - "doc": "default selected list of options. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "type", - "annotation": "Literal['value', 'index']", - "doc": "Type of value to be returned by component. \"value\" returns the list of strings of the choices selected, \"index\" returns the list of indices of the choices selected.", - "default": "\"value\"" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "info", - "annotation": "str | None", - "doc": "additional component description.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, choices in this checkbox group will be checkable; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.CheckboxGroup" - }, - { - "fn": null, - "name": "input", - "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.CheckboxGroup" - }, - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects or deselects within CheckboxGroup. Uses event data gradio.SelectData to carry `value` referring to label of selected checkbox, `index` to refer to index, and `selected` to refer to state of checkbox. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.CheckboxGroup" - } - ], - "string_shortcuts": [ - ["CheckboxGroup", "checkboxgroup", "Uses default values"] - ], - "demos": [ - [ - "sentence_builder", - "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "titanic_survival", - "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "passes the list of checked checkboxes as a List[str] or their indices as a List[int] into the function, depending on `type`.", - "postprocessing": "expects a List[str], each element of which becomes a checked checkbox.", - "examples-format": "a List[str] representing the values to be checked.", - "parent": "gradio", - "prev_obj": "Checkbox", - "next_obj": "ClearButton" - }, - "clearbutton": { - "class": null, - "name": "ClearButton", - "description": "Button that clears the value of a component or a list of components when clicked. It is instantiated with the list of components to clear.", - "tags": { - "preprocessing": "passes the button value as a {str} into the function", - "postprocessing": "expects a {str} to be returned from a function, which is set as the label of the button" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "components", - "annotation": "None | list[Component] | Component", - "doc": null, - "default": "None" - }, - { - "name": "value", - "annotation": "str", - "doc": "Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "\"Clear\"" - }, - { - "name": "variant", - "annotation": "Literal['primary', 'secondary', 'stop']", - "doc": "'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button.", - "default": "\"secondary\"" - }, - { - "name": "size", - "annotation": "Literal['sm', 'lg'] | None", - "doc": "Size of the button. Can be \"sm\" or \"lg\".", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "interactive", - "annotation": "bool", - "doc": "If False, the Button will be in a disabled state.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int | None", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "add", - "description": "Adds a component or list of components to the list of components that will be cleared when the button is clicked.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "components", - "annotation": "None | Component | list[Component]", - "doc": null - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.ClearButton" - }, - { - "fn": null, - "name": "click", - "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.ClearButton" - } - ], - "string_shortcuts": [ - ["ClearButton", "clearbutton", "Uses default values"] - ], - "preprocessing": "passes the button value as a str into the function", - "postprocessing": "expects a str to be returned from a function, which is set as the label of the button", - "parent": "gradio", - "prev_obj": "CheckboxGroup", - "next_obj": "Code" - }, - "code": { - "class": null, - "name": "Code", - "description": "Creates a Code editor for entering, editing or viewing code.", - "tags": { - "preprocessing": "passes a {str} of code into the function.", - "postprocessing": "expects the function to return a {str} of code or a single-elment {tuple}: (string filepath,)" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | tuple[str] | None", - "doc": "Default value to show in the code editor. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "language", - "annotation": "Literal['python', 'markdown', 'json', 'html', 'css', 'javascript', 'typescript', 'yaml', 'dockerfile', 'shell', 'r'] | None", - "doc": "The language to display the code as. Supported languages listed in `gr.Code.languages`.", - "default": "None" - }, - { "name": "lines", "annotation": "int", "doc": null, "default": "5" }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "Whether user should be able to enter code or only view it.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "languages", - "description": "['python', 'markdown', 'json', 'html', 'css', 'javascript', 'typescript', 'yaml', 'dockerfile', 'shell', 'r', None]", - "tags": {}, - "parameters": {}, - "returns": {}, - "example": "", - "override_signature": "gr.Code.languages", - "parent": "gradio.Code" - }, - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Code" - }, - { - "fn": null, - "name": "input", - "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Code" - } - ], - "string_shortcuts": [["Code", "code", "Uses default values"]], - "preprocessing": "passes a str of code into the function.", - "postprocessing": "expects the function to return a str of code or a single-elment tuple: (string filepath,)", - "parent": "gradio", - "prev_obj": "ClearButton", - "next_obj": "ColorPicker" - }, - "colorpicker": { - "class": null, - "name": "ColorPicker", - "description": "Creates a color picker for user to select a color as string input.", - "tags": { - "preprocessing": "passes selected color value as a {str} into the function.", - "postprocessing": "expects a {str} returned from function and sets color picker value to it.", - "examples-format": "a {str} with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.", - "demos": "color_picker, color_generator" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | Callable | None", - "doc": "default text to provide in color picker. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "info", - "annotation": "str | None", - "doc": "additional component description.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.ColorPicker" - }, - { - "fn": null, - "name": "input", - "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.ColorPicker" - }, - { - "fn": null, - "name": "submit", - "description": "This listener is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.ColorPicker" - }, - { - "fn": null, - "name": "focus", - "description": "This listener is triggered when the component is focused (e.g. when the user clicks inside a textbox). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.ColorPicker" - }, - { - "fn": null, - "name": "blur", - "description": "This listener is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.ColorPicker" - } - ], - "string_shortcuts": [ - ["ColorPicker", "colorpicker", "Uses default values"] - ], - "demos": [ - [ - "color_picker", - "import gradio as gr\nimport numpy as np\nimport os\nfrom PIL import Image, ImageColor\n\n\ndef change_color(icon, color):\n\n \"\"\"\n Function that given an icon in .png format changes its color\n Args:\n icon: Icon whose color needs to be changed.\n color: Chosen color with which to edit the input icon.\n Returns:\n edited_image: Edited icon.\n \"\"\"\n img = icon.convert(\"LA\")\n img = img.convert(\"RGBA\")\n image_np = np.array(icon)\n _, _, _, alpha = image_np.T\n mask = alpha > 0\n image_np[..., :-1][mask.T] = ImageColor.getcolor(color, \"RGB\")\n edited_image = Image.fromarray(image_np)\n return edited_image\n\n\ninputs = [\n gr.Image(label=\"icon\", type=\"pil\", image_mode=\"RGBA\"),\n gr.ColorPicker(label=\"color\"),\n]\noutputs = gr.Image(label=\"colored icon\")\n\ndemo = gr.Interface(\n fn=change_color,\n inputs=inputs,\n outputs=outputs,\n examples=[\n [os.path.join(os.path.dirname(__file__), \"rabbit.png\"), \"#ff0000\"],\n [os.path.join(os.path.dirname(__file__), \"rabbit.png\"), \"#0000FF\"],\n ],\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "color_generator", - "import gradio as gr\nimport cv2\nimport numpy as np\nimport random\n\n\n# Convert decimal color to hexadecimal color\ndef RGB_to_Hex(rgb):\n color = \"#\"\n for i in rgb:\n num = int(i)\n color += str(hex(num))[-2:].replace(\"x\", \"0\").upper()\n return color\n\n\n# Randomly generate light or dark colors\ndef random_color(is_light=True):\n return (\n random.randint(0, 127) + int(is_light) * 128,\n random.randint(0, 127) + int(is_light) * 128,\n random.randint(0, 127) + int(is_light) * 128,\n )\n\n\ndef switch_color(color_style):\n if color_style == \"light\":\n is_light = True\n elif color_style == \"dark\":\n is_light = False\n back_color_ = random_color(is_light) # Randomly generate colors\n back_color = RGB_to_Hex(back_color_) # Convert to hexadecimal\n\n # Draw color pictures.\n w, h = 50, 50\n img = np.zeros((h, w, 3), np.uint8)\n cv2.rectangle(img, (0, 0), (w, h), back_color_, thickness=-1)\n\n return back_color, back_color, img\n\n\ninputs = [gr.Radio([\"light\", \"dark\"], value=\"light\")]\n\noutputs = [\n gr.ColorPicker(label=\"color\"),\n gr.Textbox(label=\"hexadecimal color\"),\n gr.Image(type=\"numpy\", label=\"color picture\"),\n]\n\ntitle = \"Color Generator\"\ndescription = (\n \"Click the Submit button, and a dark or light color will be randomly generated.\"\n)\n\ndemo = gr.Interface(\n fn=switch_color,\n inputs=inputs,\n outputs=outputs,\n title=title,\n description=description,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "passes selected color value as a str into the function.", - "postprocessing": "expects a str returned from function and sets color picker value to it.", - "examples-format": "a str with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.", - "parent": "gradio", - "prev_obj": "Code", - "next_obj": "Dataframe" - }, - "dataframe": { - "class": null, - "name": "Dataframe", - "description": "Accepts or displays 2D input through a spreadsheet-like component for dataframes.", - "tags": { - "preprocessing": "passes the uploaded spreadsheet data as a {pandas.DataFrame}, {numpy.array}, {List[List]}, or {List} depending on `type`", - "postprocessing": "expects a {pandas.DataFrame}, {numpy.array}, {List[List]}, {List}, a {Dict} with keys `data` (and optionally `headers`), or {str} path to a csv, which is rendered in the spreadsheet.", - "examples-format": "a {str} filepath to a csv with data, a pandas dataframe, or a list of lists (excluding headers) where each sublist is a row of data.", - "demos": "filter_records, matrix_transpose, tax_calculator" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "list[list[Any]] | Callable | None", - "doc": "Default value as a 2-dimensional list of values. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "headers", - "annotation": "list[str] | None", - "doc": "List of str header names. If None, no headers are shown.", - "default": "None" - }, - { - "name": "row_count", - "annotation": "int | tuple[int, str]", - "doc": "Limit number of rows for input and decide whether user can create new rows. The first element of the tuple is an `int`, the row count; the second should be 'fixed' or 'dynamic', the new row behaviour. If an `int` is passed the rows default to 'dynamic'", - "default": "(1, 'dynamic')" - }, - { - "name": "col_count", - "annotation": "int | tuple[int, str] | None", - "doc": "Limit number of columns for input and decide whether user can create new columns. The first element of the tuple is an `int`, the number of columns; the second should be 'fixed' or 'dynamic', the new column behaviour. If an `int` is passed the columns default to 'dynamic'", - "default": "None" - }, - { - "name": "datatype", - "annotation": "str | list[str]", - "doc": "Datatype of values in sheet. Can be provided per column as a list of strings, or for the entire sheet as a single string. Valid datatypes are \"str\", \"number\", \"bool\", \"date\", and \"markdown\".", - "default": "\"str\"" - }, - { - "name": "type", - "annotation": "Literal['pandas', 'numpy', 'array']", - "doc": "Type of value to be returned by component. \"pandas\" for pandas dataframe, \"numpy\" for numpy array, or \"array\" for a Python array.", - "default": "\"pandas\"" - }, - { - "name": "max_rows", - "annotation": "int | None", - "doc": "Maximum number of rows to display at once. Set to None for infinite.", - "default": "20" - }, - { - "name": "max_cols", - "annotation": "int | None", - "doc": "Maximum number of columns to display at once. Set to None for infinite.", - "default": "None" - }, - { - "name": "overflow_row_behaviour", - "annotation": "Literal['paginate', 'show_ends']", - "doc": "If set to \"paginate\", will create pages for overflow rows. If set to \"show_ends\", will show initial and final rows and truncate middle rows.", - "default": "\"paginate\"" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, will allow users to edit the dataframe; if False, can only be used to display data. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "wrap", - "annotation": "bool", - "doc": "if True text in table cells will wrap when appropriate, if False the table will scroll horizontally. Defaults to False.", - "default": "False" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Dataframe" - }, - { - "fn": null, - "name": "input", - "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Dataframe" - }, - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects cell within Dataframe. Uses event data gradio.SelectData to carry `value` referring to value of selected cell, and `index` tuple to refer to index row and column. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Dataframe" - } - ], - "string_shortcuts": [ - ["Dataframe", "dataframe", "Uses default values"], - ["Numpy", "numpy", "Uses type=\"numpy\""], - ["Matrix", "matrix", "Uses type=\"array\""], - ["List", "list", "Uses type=\"array\", col_count=1"] - ], - "demos": [ - [ - "filter_records", - "import gradio as gr\n\n\ndef filter_records(records, gender):\n return records[records[\"gender\"] == gender]\n\n\ndemo = gr.Interface(\n filter_records,\n [\n gr.Dataframe(\n headers=[\"name\", \"age\", \"gender\"],\n datatype=[\"str\", \"number\", \"str\"],\n row_count=5,\n col_count=(3, \"fixed\"),\n ),\n gr.Dropdown([\"M\", \"F\", \"O\"]),\n ],\n \"dataframe\",\n description=\"Enter gender as 'M', 'F', or 'O' for other.\",\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "matrix_transpose", - "import numpy as np\n\nimport gradio as gr\n\n\ndef transpose(matrix):\n return matrix.T\n\n\ndemo = gr.Interface(\n transpose,\n gr.Dataframe(type=\"numpy\", datatype=\"number\", row_count=5, col_count=3),\n \"numpy\",\n examples=[\n [np.zeros((3, 3)).tolist()],\n [np.ones((2, 2)).tolist()],\n [np.random.randint(0, 10, (3, 10)).tolist()],\n [np.random.randint(0, 10, (10, 3)).tolist()],\n [np.random.randint(0, 10, (10, 10)).tolist()],\n ],\n cache_examples=False\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "tax_calculator", - "import gradio as gr\n\ndef tax_calculator(income, marital_status, assets):\n tax_brackets = [(10, 0), (25, 8), (60, 12), (120, 20), (250, 30)]\n total_deductible = sum(assets[\"Cost\"])\n taxable_income = income - total_deductible\n\n total_tax = 0\n for bracket, rate in tax_brackets:\n if taxable_income > bracket:\n total_tax += (taxable_income - bracket) * rate / 100\n\n if marital_status == \"Married\":\n total_tax *= 0.75\n elif marital_status == \"Divorced\":\n total_tax *= 0.8\n\n return round(total_tax)\n\ndemo = gr.Interface(\n tax_calculator,\n [\n \"number\",\n gr.Radio([\"Single\", \"Married\", \"Divorced\"]),\n gr.Dataframe(\n headers=[\"Item\", \"Cost\"],\n datatype=[\"str\", \"number\"],\n label=\"Assets Purchased this Year\",\n ),\n ],\n \"number\",\n examples=[\n [10000, \"Married\", [[\"Suit\", 5000], [\"Laptop\", 800], [\"Car\", 1800]]],\n [80000, \"Single\", [[\"Suit\", 800], [\"Watch\", 1800], [\"Car\", 800]]],\n ],\n)\n\ndemo.launch()\n" - ] - ], - "preprocessing": "passes the uploaded spreadsheet data as a pandas.DataFrame, numpy.array, List[List], or List depending on `type`", - "postprocessing": "expects a pandas.DataFrame, numpy.array, List[List], List, a Dict with keys `data` (and optionally `headers`), or str path to a csv, which is rendered in the spreadsheet.", - "examples-format": "a str filepath to a csv with data, a pandas dataframe, or a list of lists (excluding headers) where each sublist is a row of data.", - "parent": "gradio", - "prev_obj": "ColorPicker", - "next_obj": "Dataset" - }, - "dataset": { - "class": null, - "name": "Dataset", - "description": "Used to create an output widget for showing datasets. Used to render the examples box.", - "tags": { - "preprocessing": "passes the selected sample either as a {list} of data (if type=\"value\") or as an {int} index (if type=\"index\")", - "postprocessing": "expects a {list} of {lists} corresponding to the dataset data." - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "label", - "annotation": "str | None", - "doc": null, - "default": "None" - }, - { - "name": "components", - "annotation": "list[IOComponent] | list[str]", - "doc": "Which component types to show in this dataset widget, can be passed in as a list of string names or Components instances. The following components are supported in a Dataset: Audio, Checkbox, CheckboxGroup, ColorPicker, Dataframe, Dropdown, File, HTML, Image, Markdown, Model3D, Number, Radio, Slider, Textbox, TimeSeries, Video" - }, - { - "name": "samples", - "annotation": "list[list[Any]] | None", - "doc": "a nested list of samples. Each sublist within the outer list represents a data sample, and each element within the sublist represents an value for each component", - "default": "None" - }, - { - "name": "headers", - "annotation": "list[str] | None", - "doc": "Column headers in the Dataset widget, should be the same len as components. If not provided, inferred from component labels", - "default": "None" - }, - { - "name": "type", - "annotation": "Literal['values', 'index']", - "doc": "'values' if clicking on a sample should pass the value of the sample, or \"index\" if it should pass the index of the sample", - "default": "\"values\"" - }, - { - "name": "samples_per_page", - "annotation": "int", - "doc": "how many examples to show per page.", - "default": "10" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "click", - "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Dataset" - }, - { - "fn": null, - "name": "select", - "description": "This listener is triggered when the user selects from within the Component. This event has EventData of type gradio.SelectData that carries information, accessible through SelectData.index and SelectData.value. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Dataset" - } - ], - "string_shortcuts": [["Dataset", "dataset", "Uses default values"]], - "preprocessing": "passes the selected sample either as a list of data (if type=\"value\") or as an int index (if type=\"index\")", - "postprocessing": "expects a list of lists corresponding to the dataset data.", - "override_signature": "gr.Dataset(components, samples)", - "parent": "gradio", - "prev_obj": "Dataframe", - "next_obj": "Dropdown" - }, - "dropdown": { - "class": null, - "name": "Dropdown", - "description": "Creates a dropdown of choices from which entries can be selected.", - "tags": { - "preprocessing": "passes the value of the selected dropdown entry as a {str} or its index as an {int} into the function, depending on `type`.", - "postprocessing": "expects a {str} corresponding to the value of the dropdown entry to be selected.", - "examples-format": "a {str} representing the drop down value to select.", - "demos": "sentence_builder, titanic_survival" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "choices", - "annotation": "list[str] | None", - "doc": "list of options to select from.", - "default": "None" - }, - { - "name": "value", - "annotation": "str | list[str] | Callable | None", - "doc": "default value(s) selected in dropdown. If None, no value is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "type", - "annotation": "Literal['value', 'index']", - "doc": "Type of value to be returned by component. \"value\" returns the string of the choice selected, \"index\" returns the index of the choice selected.", - "default": "\"value\"" - }, - { - "name": "multiselect", - "annotation": "bool | None", - "doc": "if True, multiple choices can be selected.", - "default": "None" - }, - { - "name": "allow_custom_value", - "annotation": "bool", - "doc": "If True, allows user to enter a custom value that is not in the list of choices. Only applies if `multiselect` is False.", - "default": "False" - }, - { - "name": "max_choices", - "annotation": "int | None", - "doc": "maximum number of choices that can be selected. If None, no limit is enforced.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "info", - "annotation": "str | None", - "doc": "additional component description.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, choices in this dropdown will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Dropdown" - }, - { - "fn": null, - "name": "input", - "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Dropdown" - }, - { - "fn": null, - "name": "focus", - "description": "This listener is triggered when the component is focused (e.g. when the user clicks inside a textbox). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Dropdown" - }, - { - "fn": null, - "name": "blur", - "description": "This listener is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Dropdown" - }, - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects Dropdown option. Uses event data gradio.SelectData to carry `value` referring to label of selected option, and `index` to refer to index. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Dropdown" - } - ], - "string_shortcuts": [["Dropdown", "dropdown", "Uses default values"]], - "demos": [ - [ - "sentence_builder", - "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "titanic_survival", - "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "passes the value of the selected dropdown entry as a str or its index as an int into the function, depending on `type`.", - "postprocessing": "expects a str corresponding to the value of the dropdown entry to be selected.", - "examples-format": "a str representing the drop down value to select.", - "parent": "gradio", - "prev_obj": "Dataset", - "next_obj": "DuplicateButton" - }, - "duplicatebutton": { - "class": null, - "name": "DuplicateButton", - "description": "Button that triggers a Spaces Duplication, when the demo is on Hugging Face Spaces. Does nothing locally.", - "tags": { - "preprocessing": "passes the button value as a {str} into the function", - "postprocessing": "expects a {str} to be returned from a function, which is set as the label of the button" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str", - "doc": "Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "\"Duplicate Space\"" - }, - { - "name": "variant", - "annotation": "Literal['primary', 'secondary', 'stop']", - "doc": "'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button.", - "default": "\"secondary\"" - }, - { - "name": "size", - "annotation": "Literal['sm', 'lg'] | None", - "doc": "Size of the button. Can be \"sm\" or \"lg\".", - "default": "\"sm\"" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "interactive", - "annotation": "bool", - "doc": "If False, the Button will be in a disabled state.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "0" - }, - { - "name": "min_width", - "annotation": "int | None", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "click", - "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.DuplicateButton" - } - ], - "string_shortcuts": [ - ["DuplicateButton", "duplicatebutton", "Uses default values"] - ], - "preprocessing": "passes the button value as a str into the function", - "postprocessing": "expects a str to be returned from a function, which is set as the label of the button", - "parent": "gradio", - "prev_obj": "Dropdown", - "next_obj": "File" - }, - "file": { - "class": null, - "name": "File", - "description": "Creates a file component that allows uploading generic file (when used as an input) and or displaying generic files (output).", - "tags": { - "preprocessing": "passes the uploaded file as a {tempfile._TemporaryFileWrapper} or {List[tempfile._TemporaryFileWrapper]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`)", - "postprocessing": "expects function to return a {str} path to a file, or {List[str]} consisting of paths to files.", - "examples-format": "a {str} path to a local file that populates the component.", - "demos": "zip_to_json, zip_files" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | list[str] | Callable | None", - "doc": "Default file to display, given as str file path. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "file_count", - "annotation": "Literal['single', 'multiple', 'directory']", - "doc": "if single, allows user to upload one file. If \"multiple\", user uploads multiple files. If \"directory\", user uploads all files in selected directory. Return type will be list for each file in case of \"multiple\" or \"directory\".", - "default": "\"single\"" - }, - { - "name": "file_types", - "annotation": "list[str] | None", - "doc": "List of file extensions or types of files to be uploaded (e.g. ['image', '.json', '.mp4']). \"file\" allows any file to be uploaded, \"image\" allows only image files to be uploaded, \"audio\" allows only audio files to be uploaded, \"video\" allows only video files to be uploaded, \"text\" allows only text files to be uploaded.", - "default": "None" - }, - { - "name": "type", - "annotation": "Literal['file', 'binary']", - "doc": "Type of value to be returned by component. \"file\" returns a temporary file object with the same base name as the uploaded file, whose full path can be retrieved by file_obj.name, \"binary\" returns an bytes object.", - "default": "\"file\"" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, will allow users to upload a file; if False, can only be used to display files. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.File" - }, - { - "fn": null, - "name": "clear", - "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.File" - }, - { - "fn": null, - "name": "upload", - "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.File" - }, - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects file from list. Uses event data gradio.SelectData to carry `value` referring to name of selected file, and `index` to refer to index. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.File" - } - ], - "string_shortcuts": [ - ["File", "file", "Uses default values"], - ["Files", "files", "Uses file_count=\"multiple\""] - ], - "demos": [ - [ - "zip_to_json", - "from zipfile import ZipFile\n\nimport gradio as gr\n\n\ndef zip_to_json(file_obj):\n files = []\n with ZipFile(file_obj.name) as zfile:\n for zinfo in zfile.infolist():\n files.append(\n {\n \"name\": zinfo.filename,\n \"file_size\": zinfo.file_size,\n \"compressed_size\": zinfo.compress_size,\n }\n )\n return files\n\n\ndemo = gr.Interface(zip_to_json, \"file\", \"json\")\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "zip_files", - "import os\nfrom zipfile import ZipFile\n\nimport gradio as gr\n\n\ndef zip_files(files):\n with ZipFile(\"tmp.zip\", \"w\") as zipObj:\n for idx, file in enumerate(files):\n zipObj.write(file.name, file.name.split(\"/\")[-1])\n return \"tmp.zip\"\n\ndemo = gr.Interface(\n zip_files,\n gr.File(file_count=\"multiple\", file_types=[\"text\", \".json\", \".csv\"]),\n \"file\",\n examples=[[[os.path.join(os.path.dirname(__file__),\"files/titanic.csv\"), \n os.path.join(os.path.dirname(__file__),\"files/titanic.csv\"), \n os.path.join(os.path.dirname(__file__),\"files/titanic.csv\")]]], \n cache_examples=True\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "passes the uploaded file as a tempfile._TemporaryFileWrapper or List[tempfile._TemporaryFileWrapper] depending on `file_count` (or a bytes/Listbytes depending on `type`)", - "postprocessing": "expects function to return a str path to a file, or List[str] consisting of paths to files.", - "examples-format": "a str path to a local file that populates the component.", - "parent": "gradio", - "prev_obj": "DuplicateButton", - "next_obj": "Gallery" - }, - "gallery": { - "class": null, - "name": "Gallery", - "description": "Used to display a list of images as a gallery that can be scrolled through.
", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a list of images in any format, {List[numpy.array | PIL.Image | str | pathlib.Path]}, or a {List} of (image, {str} caption) tuples and displays them.", - "demos": "fake_gan" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "list[np.ndarray | _Image.Image | str | Path | tuple] | Callable | None", - "doc": "List of images to display in the gallery by default. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "columns", - "annotation": "int | tuple | None", - "doc": "Represents the number of images that should be shown in one row, for each of the six standard screen sizes (<576px, <768px, <992px, <1200px, <1400px, >1400px). if fewer that 6 are given then the last will be used for all subsequent breakpoints", - "default": "2" - }, - { - "name": "rows", - "annotation": "int | tuple | None", - "doc": "Represents the number of rows in the image grid, for each of the six standard screen sizes (<576px, <768px, <992px, <1200px, <1400px, >1400px). if fewer that 6 are given then the last will be used for all subsequent breakpoints", - "default": "None" - }, - { - "name": "height", - "annotation": "str | None", - "doc": "Height of the gallery.", - "default": "None" - }, - { - "name": "preview", - "annotation": "bool | None", - "doc": "If True, will display the Gallery in preview mode, which shows all of the images as thumbnails and allows the user to click on them to view them in full size.", - "default": "None" - }, - { - "name": "object_fit", - "annotation": "Literal['contain', 'cover', 'fill', 'none', 'scale-down'] | None", - "doc": "CSS object-fit property for the thumbnail images in the gallery. Can be \"contain\", \"cover\", \"fill\", \"none\", or \"scale-down\".", - "default": "None" - }, - { - "name": "allow_preview", - "annotation": "bool", - "doc": "If True, images in the gallery will be enlarged when they are clicked. Default is True.", - "default": "True" - }, - { - "name": "show_share_button", - "annotation": "bool | None", - "doc": "If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.", - "default": "None" - }, - { - "name": "show_download_button", - "annotation": "bool | None", - "doc": "If True, will show a download button in the corner of the selected image. If False, the icon does not appear. Default is True.", - "default": "True" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects image within Gallery. Uses event data gradio.SelectData to carry `value` referring to caption of selected image, and `index` to refer to index. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Gallery" - } - ], - "string_shortcuts": [["Gallery", "gallery", "Uses default values"]], - "demos": [ - [ - "fake_gan", - "# This demo needs to be run from the repo folder.\n# python demo/fake_gan/run.py\nimport random\n\nimport gradio as gr\n\n\ndef fake_gan():\n images = [\n (random.choice(\n [\n \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n ]\n ), f\"label {i}\" if i != 0 else \"label\" * 50)\n for i in range(3)\n ]\n return images\n\n\nwith gr.Blocks() as demo:\n with gr.Column(variant=\"panel\"):\n with gr.Row(variant=\"compact\"):\n text = gr.Textbox(\n label=\"Enter your prompt\",\n show_label=False,\n max_lines=1,\n placeholder=\"Enter your prompt\",\n ).style(\n container=False,\n )\n btn = gr.Button(\"Generate image\").style(full_width=False)\n\n gallery = gr.Gallery(\n label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n ).style(columns=[2], rows=[2], object_fit=\"contain\", height=\"auto\")\n\n btn.click(fake_gan, None, gallery)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a list of images in any format, List[numpy.array | PIL.Image | str | pathlib.Path], or a List of (image, str caption) tuples and displays them.", - "parent": "gradio", - "prev_obj": "File", - "next_obj": "HTML" - }, - "html": { - "class": null, - "name": "HTML", - "description": "Used to display arbitrary HTML output.
", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a valid HTML {str}.", - "demos": "text_analysis", - "guides": "key-features" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | Callable", - "doc": "Default value. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "\"\"" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.HTML" - } - ], - "string_shortcuts": [["HTML", "html", "Uses default values"]], - "demos": [ - [ - "text_analysis", - "import gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n doc = nlp(text)\n html = displacy.render(doc, style=\"dep\", page=True)\n html = (\n \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n" - ] - ], - "guides": [ - { - "name": "key-features", - "category": "getting-started", - "pretty_category": "Getting Started", - "guide_index": 2, - "absolute_index": 1, - "pretty_name": "Key Features", - "content": "# Key Features\n\nLet's go through some of the most popular features of Gradio! Here are Gradio's key features:\n\n1. [Adding example inputs](#example-inputs)\n2. [Passing custom error messages](#alerts)\n3. [Adding descriptive content](#descriptive-content)\n4. [Setting up flagging](#flagging)\n5. [Preprocessing and postprocessing](#preprocessing-and-postprocessing)\n6. [Styling demos](#styling)\n7. [Queuing users](#queuing)\n8. [Iterative outputs](#iterative-outputs)\n9. [Progress bars](#progress-bars)\n10. [Batch functions](#batch-functions)\n11. [Running on collaborative notebooks](#colab-notebooks)\n\n## Example Inputs\n\nYou can provide example data that a user can easily load into `Interface`. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a **nested list** to the `examples=` keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the [Docs](https://gradio.app/docs#components).\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n if num2 == 0:\n raise gr.Error(\"Cannot divide by zero!\")\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\", \n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n examples=[\n [5, \"add\", 3],\n [4, \"divide\", 2],\n [-4, \"multiply\", 2.5],\n [0, \"subtract\", 1.2],\n ],\n title=\"Toy Calculator\",\n description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n```\n\n\nYou can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the `examples_per_page` argument of `Interface`).\n\nContinue learning about examples in the [More On Examples](https://gradio.app/more-on-examples) guide.\n\n## Alerts\n\nYou wish to pass custom error messages to the user. To do so, raise a `gr.Error(\"custom message\")` to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the [docs](https://gradio.app/docs#error). \n\nYou can also issue `gr.Warning(\"message\")` and `gr.Info(\"message\")` by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work. \n\nNote below how the `gr.Error` has to be raised, while the `gr.Warning` and `gr.Info` are single lines.\n\n```python\ndef start_process(name):\n gr.Info(\"Starting process\")\n if name is None:\n gr.Warning(\"Name is empty\")\n ...\n if success == False:\n raise gr.Error(\"Process failed\")\n```\n \n## Descriptive Content\n\nIn the previous example, you may have noticed the `title=` and `description=` keyword arguments in the `Interface` constructor that helps users understand your app.\n\nThere are three arguments in the `Interface` constructor to specify where this content should go:\n\n* `title`: which accepts text and can display it at the very top of interface, and also becomes the page title.\n* `description`: which accepts text, markdown or HTML and places it right under the title.\n* `article`: which also accepts text, markdown or HTML and places it below the interface.\n\n![annotated](https://github.com/gradio-app/gradio/blob/main/guides/assets/annotated.png?raw=true)\n\nIf you're using the `Blocks` API instead, you can insert text, markdown, or HTML anywhere using the `gr.Markdown(...)` or `gr.HTML(...)` components, with descriptive content inside the `Component` constructor.\n\nAnother useful keyword argument is `label=`, which is present in every `Component`. This modifies the label text at the top of each `Component`. You can also add the `info=` keyword argument to form elements like `Textbox` or `Radio` to provide further information on their usage.\n\n```python\ngr.Number(label='Age', info='In years, must be greater than 0')\n```\n\n## Flagging\n\nBy default, an `Interface` will have \"Flag\" button. When a user testing your `Interface` sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the `flagging_dir=` argument to the `Interface` constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.\n\nFor example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- calculator.py\n+-- flagged/\n| +-- logs.csv\n```\n\n*flagged/logs.csv*\n\n```csv\nnum1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n```\n\nWith the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- sepia.py\n+-- flagged/\n| +-- logs.csv\n| +-- im/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n\n*flagged/logs.csv*\n\n```csv\nim,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.\n\n## Preprocessing and Postprocessing\n\n![](https://github.com/gradio-app/gradio/blob/main/js/_website/src/assets/img/dataflow.svg?raw=true)\n\nAs you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.\n\nWhen a component is used as an input, Gradio automatically handles the *preprocessing* needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a `numpy` array).\n\nSimilarly, when a component is used as an output, Gradio automatically handles the *postprocessing* needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a `Gallery` of images in base64 format).\n\nYou can control the *preprocessing* using the parameters when constructing the image component. For example, here if you instantiate the `Image` component with the following parameters, it will convert the image to the `PIL` type and reshape it to be `(100, 100)` no matter the original size that it was submitted as:\n\n```py\nimg = gr.Image(shape=(100, 100), type=\"pil\")\n```\n\nIn contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:\n\n```py\nimg = gr.Image(invert_colors=True, type=\"numpy\")\n```\n\nPostprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the `Image` a `numpy` array or a `str` filepath?) and postprocesses it into a format that can be displayed by the browser.\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the preprocessing-related parameters for each Component.\n\n## Styling\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Interface` constructor. For example:\n\n```python\ndemo = gr.Interface(..., theme=gr.themes.Monochrome())\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](https://gradio.app/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n\n```python\nwith gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nSome components can be additionally styled through the `style()` method. For example:\n\n```python\nimg = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n```\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the styling options for each Component.\n\n## Queuing\n\nIf your app expects heavy traffic, use the `queue()` method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(...).queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```python\nwith gr.Blocks() as demo:\n #...\ndemo.queue()\ndemo.launch()\n```\n\nYou can control the number of requests processed at a single time as such:\n\n```python\ndemo.queue(concurrency_count=3)\n```\n\nSee the [Docs on queueing](/docs/#queue) on configuring other queuing parameters.\n\nTo specify only certain functions for queueing in Blocks:\n\n```python\nwith gr.Blocks() as demo2:\n num1 = gr.Number()\n num2 = gr.Number()\n output = gr.Number()\n gr.Button(\"Add\").click(\n lambda a, b: a + b, [num1, num2], output)\n gr.Button(\"Multiply\").click(\n lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n```\n\n## Iterative Outputs\n\nIn some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.\n\nIn such cases, you can supply a **generator** function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single `return` value, a function should `yield` a series of values instead. Usually the `yield` statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:\n\n```python\ndef my_generator(x):\n for i in range(x):\n yield i\n```\n\nYou supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:\n\n```python\nimport gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n for _ in range(steps):\n time.sleep(1)\n image = np.random.random((600, 600, 3))\n yield image\n image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n```\n\n\nNote that we've added a `time.sleep(1)` in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).\n\nSupplying a generator into Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n## Progress Bars\n\nGradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a `gr.Progress` instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the `tqdm()` method of the `Progress` instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.\n\n```python\nimport gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n progress(0, desc=\"Starting\")\n time.sleep(1)\n progress(0.05)\n new_string = \"\"\n for letter in progress.tqdm(word, desc=\"Reversing\"):\n time.sleep(0.25)\n new_string = letter + new_string\n return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n demo.queue(concurrency_count=10).launch()\n\n```\n\n\nIf you use the `tqdm` library, you can even report progress updates automatically from any `tqdm.tqdm` that already exists within your function by setting the default argument as `gr.Progress(track_tqdm=True)`!\n\n## Batch Functions\n\nGradio supports the ability to pass *batch* functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.\n\nFor example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:\n\n```py\nimport time\n\ndef trim_words(words, lens):\n trimmed_words = []\n time.sleep(5)\n for w, l in zip(words, lens):\n trimmed_words.append(w[:int(l)]) \n return [trimmed_words]\n```\n\nThe advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically *batch* incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe `batch=True` and `max_batch_size=16` -- both of these parameters can be passed\ninto event triggers or into the `Interface` class)\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n word = gr.Textbox(label=\"word\")\n leng = gr.Number(label=\"leng\")\n output = gr.Textbox(label=\"Output\")\n with gr.Row():\n run = gr.Button()\n\n event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n```\n\nIn the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face `transformers` and `diffusers` models\nwork very naturally with Gradio's batch mode: here's [an example demo using diffusers to\ngenerate images in batches](https://github.com/gradio-app/gradio/blob/main/demo/diffusers_with_batching/run.py)\n\nNote: using batch functions with Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n\n## Colab Notebooks\n\n\nGradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as [Google Colab](https://colab.research.google.com/). In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by [service worker tunneling](https://github.com/tensorflow/tensorboard/blob/master/docs/design/colab_integration.md), which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use [SSH tunneling](https://coderwall.com/p/ohk6cg/remote-access-to-ipython-notebooks-via-ssh) to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, [discussed in the next Guide](https://gradio.app/guides/sharing-your-app/#sharing-demos). ", - "html": "

Key Features

\n\n

Let's go through some of the most popular features of Gradio! Here are Gradio's key features:

\n\n
    \n
  1. Adding example inputs
  2. \n
  3. Passing custom error messages
  4. \n
  5. Adding descriptive content
  6. \n
  7. Setting up flagging
  8. \n
  9. Preprocessing and postprocessing
  10. \n
  11. Styling demos
  12. \n
  13. Queuing users
  14. \n
  15. Iterative outputs
  16. \n
  17. Progress bars
  18. \n
  19. Batch functions
  20. \n
  21. Running on collaborative notebooks
  22. \n
\n\n

Example Inputs

\n\n

You can provide example data that a user can easily load into Interface. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a nested list to the examples= keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the Docs.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        if num2 == 0:\n            raise gr.Error(\"Cannot divide by zero!\")\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\", \n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    examples=[\n        [5, \"add\", 3],\n        [4, \"divide\", 2],\n        [-4, \"multiply\", 2.5],\n        [0, \"subtract\", 1.2],\n    ],\n    title=\"Toy Calculator\",\n    description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n
\n\n

\n\n

You can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the examples_per_page argument of Interface).

\n\n

Continue learning about examples in the More On Examples guide.

\n\n

Alerts

\n\n

You wish to pass custom error messages to the user. To do so, raise a gr.Error(\"custom message\") to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the docs.

\n\n

You can also issue gr.Warning(\"message\") and gr.Info(\"message\") by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work.

\n\n

Note below how the gr.Error has to be raised, while the gr.Warning and gr.Info are single lines.

\n\n
def start_process(name):\n    gr.Info(\"Starting process\")\n    if name is None:\n        gr.Warning(\"Name is empty\")\n    ...\n    if success == False:\n        raise gr.Error(\"Process failed\")\n
\n\n

Descriptive Content

\n\n

In the previous example, you may have noticed the title= and description= keyword arguments in the Interface constructor that helps users understand your app.

\n\n

There are three arguments in the Interface constructor to specify where this content should go:

\n\n
    \n
  • title: which accepts text and can display it at the very top of interface, and also becomes the page title.
  • \n
  • description: which accepts text, markdown or HTML and places it right under the title.
  • \n
  • article: which also accepts text, markdown or HTML and places it below the interface.
  • \n
\n\n

\"annotated\"

\n\n

If you're using the Blocks API instead, you can insert text, markdown, or HTML anywhere using the gr.Markdown(...) or gr.HTML(...) components, with descriptive content inside the Component constructor.

\n\n

Another useful keyword argument is label=, which is present in every Component. This modifies the label text at the top of each Component. You can also add the info= keyword argument to form elements like Textbox or Radio to provide further information on their usage.

\n\n
gr.Number(label='Age', info='In years, must be greater than 0')\n
\n\n

Flagging

\n\n

By default, an Interface will have \"Flag\" button. When a user testing your Interface sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the flagging_dir= argument to the Interface constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.

\n\n

For example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- calculator.py\n+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n
\n\n

With the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- sepia.py\n+-- flagged/\n|   +-- logs.csv\n|   +-- im/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.

\n\n

Preprocessing and Postprocessing

\n\n

\"\"

\n\n

As you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.

\n\n

When a component is used as an input, Gradio automatically handles the preprocessing needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a numpy array).

\n\n

Similarly, when a component is used as an output, Gradio automatically handles the postprocessing needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a Gallery of images in base64 format).

\n\n

You can control the preprocessing using the parameters when constructing the image component. For example, here if you instantiate the Image component with the following parameters, it will convert the image to the PIL type and reshape it to be (100, 100) no matter the original size that it was submitted as:

\n\n
img = gr.Image(shape=(100, 100), type=\"pil\")\n
\n\n

In contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:

\n\n
img = gr.Image(invert_colors=True, type=\"numpy\")\n
\n\n

Postprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the Image a numpy array or a str filepath?) and postprocesses it into a format that can be displayed by the browser.

\n\n

Take a look at the Docs to see all the preprocessing-related parameters for each Component.

\n\n

Styling

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Interface constructor. For example:

\n\n
demo = gr.Interface(..., theme=gr.themes.Monochrome())\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.\nThe base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

Some components can be additionally styled through the style() method. For example:

\n\n
img = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n
\n\n

Take a look at the Docs to see all the styling options for each Component.

\n\n

Queuing

\n\n

If your app expects heavy traffic, use the queue() method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).

\n\n

With Interface:

\n\n
demo = gr.Interface(...).queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
with gr.Blocks() as demo:\n    #...\ndemo.queue()\ndemo.launch()\n
\n\n

You can control the number of requests processed at a single time as such:

\n\n
demo.queue(concurrency_count=3)\n
\n\n

See the Docs on queueing on configuring other queuing parameters.

\n\n

To specify only certain functions for queueing in Blocks:

\n\n
with gr.Blocks() as demo2:\n    num1 = gr.Number()\n    num2 = gr.Number()\n    output = gr.Number()\n    gr.Button(\"Add\").click(\n        lambda a, b: a + b, [num1, num2], output)\n    gr.Button(\"Multiply\").click(\n        lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n
\n\n

Iterative Outputs

\n\n

In some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.

\n\n

In such cases, you can supply a generator function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single return value, a function should yield a series of values instead. Usually the yield statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:

\n\n
def my_generator(x):\n    for i in range(x):\n        yield i\n
\n\n

You supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:

\n\n
import gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n    for _ in range(steps):\n        time.sleep(1)\n        image = np.random.random((600, 600, 3))\n        yield image\n    image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n    yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n
\n\n

\n\n

Note that we've added a time.sleep(1) in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).

\n\n

Supplying a generator into Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Progress Bars

\n\n

Gradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a gr.Progress instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the tqdm() method of the Progress instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.

\n\n
import gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n    progress(0, desc=\"Starting\")\n    time.sleep(1)\n    progress(0.05)\n    new_string = \"\"\n    for letter in progress.tqdm(word, desc=\"Reversing\"):\n        time.sleep(0.25)\n        new_string = letter + new_string\n    return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n    demo.queue(concurrency_count=10).launch()\n\n
\n\n

\n\n

If you use the tqdm library, you can even report progress updates automatically from any tqdm.tqdm that already exists within your function by setting the default argument as gr.Progress(track_tqdm=True)!

\n\n

Batch Functions

\n\n

Gradio supports the ability to pass batch functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.

\n\n

For example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:

\n\n
import time\n\ndef trim_words(words, lens):\n    trimmed_words = []\n    time.sleep(5)\n    for w, l in zip(words, lens):\n        trimmed_words.append(w[:int(l)])        \n    return [trimmed_words]\n
\n\n

The advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically batch incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe batch=True and max_batch_size=16 -- both of these parameters can be passed\ninto event triggers or into the Interface class)

\n\n

With Interface:

\n\n
demo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n                    batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        word = gr.Textbox(label=\"word\")\n        leng = gr.Number(label=\"leng\")\n        output = gr.Textbox(label=\"Output\")\n    with gr.Row():\n        run = gr.Button()\n\n    event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n
\n\n

In the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face transformers and diffusers models\nwork very naturally with Gradio's batch mode: here's an example demo using diffusers to\ngenerate images in batches

\n\n

Note: using batch functions with Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Colab Notebooks

\n\n

Gradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as Google Colab. In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by service worker tunneling, which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use SSH tunneling to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, discussed in the next Guide.

\n", - "tags": [], - "spaces": [], - "url": "/guides/key-features/", - "contributor": null - } - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a valid HTML str.", - "parent": "gradio", - "prev_obj": "Gallery", - "next_obj": "HighlightedText" - }, - "highlightedtext": { - "class": null, - "name": "HighlightedText", - "description": "Displays text that contains spans that are highlighted by category or numerical value.
", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a {List[Tuple[str, float | str]]]} consisting of spans of text and their associated labels, or a {Dict} with two keys: (1) \"text\" whose value is the complete text, and (2) \"entities\", which is a list of dictionaries, each of which have the keys: \"entity\" (consisting of the entity label, can alternatively be called \"entity_group\"), \"start\" (the character index where the label starts), and \"end\" (the character index where the label ends). Entities should not overlap.", - "demos": "diff_texts, text_analysis", - "guides": "named-entity-recognition" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "list[tuple[str, str | float | None]] | dict | Callable | None", - "doc": "Default value to show. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "color_map", - "annotation": "dict[str, str] | None", - "doc": null, - "default": "None" - }, - { - "name": "show_legend", - "annotation": "bool", - "doc": "whether to show span categories in a separate legend or inline.", - "default": "False" - }, - { - "name": "combine_adjacent", - "annotation": "bool", - "doc": "If True, will merge the labels of adjacent tokens belonging to the same category.", - "default": "False" - }, - { - "name": "adjacent_separator", - "annotation": "str", - "doc": "Specifies the separator to be used between tokens if combine_adjacent is True.", - "default": "\"\"" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.HighlightedText" - }, - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects Highlighted text span. Uses event data gradio.SelectData to carry `value` referring to selected [text, label] tuple, and `index` to refer to span index. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.HighlightedText" - } - ], - "string_shortcuts": [ - ["HighlightedText", "highlightedtext", "Uses default values"] - ], - "demos": [ - [ - "diff_texts", - "from difflib import Differ\n\nimport gradio as gr\n\n\ndef diff_texts(text1, text2):\n d = Differ()\n return [\n (token[2:], token[0] if token[0] != \" \" else None)\n for token in d.compare(text1, text2)\n ]\n\n\ndemo = gr.Interface(\n diff_texts,\n [\n gr.Textbox(\n label=\"Text 1\",\n info=\"Initial text\",\n lines=3,\n value=\"The quick brown fox jumped over the lazy dogs.\",\n ),\n gr.Textbox(\n label=\"Text 2\",\n info=\"Text to compare\",\n lines=3,\n value=\"The fast brown fox jumps over lazy dogs.\",\n ),\n ],\n gr.HighlightedText(\n label=\"Diff\",\n combine_adjacent=True,\n show_legend=True,\n ).style(color_map={\"+\": \"red\", \"-\": \"green\"}),\n theme=gr.themes.Base()\n)\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "text_analysis", - "import gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n doc = nlp(text)\n html = displacy.render(doc, style=\"dep\", page=True)\n html = (\n \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n" - ] - ], - "guides": [ - { - "name": "named-entity-recognition", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 39, - "pretty_name": "Named Entity Recognition", - "content": "# Named-Entity Recognition \n\n\n\n\n## Introduction\n\nNamed-entity recognition (NER), also known as token classification or text tagging, is the task of taking a sentence and classifying every word (or \"token\") into different categories, such as names of people or names of locations, or different parts of speech. \n\nFor example, given the sentence:\n\n> Does Chicago have any Pakistani restaurants?\n\nA named-entity recognition algorithm may identify:\n\n* \"Chicago\" as a **location**\n* \"Pakistani\" as an **ethnicity** \n\n\nand so on. \n\nUsing `gradio` (specifically the `HighlightedText` component), you can easily build a web demo of your NER model and share that with the rest of your team.\n\nHere is an example of a demo that you'll be able to build:\n\n\n\nThis tutorial will show how to take a pretrained NER model and deploy it with a Gradio interface. We will show two different ways to use the `HighlightedText` component -- depending on your NER model, either of these two ways may be easier to learn! \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained named-entity recognition model. You can use your own, while in this tutorial, we will use one from the `transformers` library.\n\n### Approach 1: List of Entity Dictionaries\n\nMany named-entity recognition models output a list of dictionaries. Each dictionary consists of an *entity*, a \"start\" index, and an \"end\" index. This is, for example, how NER models in the `transformers` library operate:\n\n```py\nfrom transformers import pipeline \nner_pipeline = pipeline(\"ner\")\nner_pipeline(\"Does Chicago have any Pakistani restaurants\")\n```\n\nOutput:\n\n```bash\n[{'entity': 'I-LOC',\n 'score': 0.9988978,\n 'index': 2,\n 'word': 'Chicago',\n 'start': 5,\n 'end': 12},\n {'entity': 'I-MISC',\n 'score': 0.9958592,\n 'index': 5,\n 'word': 'Pakistani',\n 'start': 22,\n 'end': 31}]\n```\n\nIf you have such a model, it is very easy to hook it up to Gradio's `HighlightedText` component. All you need to do is pass in this **list of entities**, along with the **original text** to the model, together as dictionary, with the keys being `\"entities\"` and `\"text\"` respectively.\n\nHere is a complete example:\n\n```python\nfrom transformers import pipeline\n\nimport gradio as gr\n\nner_pipeline = pipeline(\"ner\")\n\nexamples = [\n \"Does Chicago have any stores and does Joe live here?\",\n]\n\ndef ner(text):\n output = ner_pipeline(text)\n return {\"text\": text, \"entities\": output} \n\ndemo = gr.Interface(ner,\n gr.Textbox(placeholder=\"Enter sentence here...\"), \n gr.HighlightedText(),\n examples=examples)\n\ndemo.launch()\n\n```\n\n\n### Approach 2: List of Tuples\n\nAn alternative way to pass data into the `HighlightedText` component is a list of tuples. The first element of each tuple should be the word or words that are being classified into a particular entity. The second element should be the entity label (or `None` if they should be unlabeled). The `HighlightedText` component automatically strings together the words and labels to display the entities.\n\nIn some cases, this can be easier than the first approach. Here is a demo showing this approach using Spacy's parts-of-speech tagger:\n\n```python\nimport gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n doc = nlp(text)\n html = displacy.render(doc, style=\"dep\", page=True)\n html = (\n \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n\n```\n\n\n\n--------------------------------------------\n\n\nAnd you're done! That's all you need to know to build a web-based GUI for your NER model. \n\nFun tip: you can share your NER demo instantly with others simply by setting `share=True` in `launch()`. \n\n\n", - "html": "

Named-Entity Recognition

\n\n

Introduction

\n\n

Named-entity recognition (NER), also known as token classification or text tagging, is the task of taking a sentence and classifying every word (or \"token\") into different categories, such as names of people or names of locations, or different parts of speech.

\n\n

For example, given the sentence:

\n\n
\n

Does Chicago have any Pakistani restaurants?

\n
\n\n

A named-entity recognition algorithm may identify:

\n\n
    \n
  • \"Chicago\" as a location
  • \n
  • \"Pakistani\" as an ethnicity
  • \n
\n\n

and so on.

\n\n

Using gradio (specifically the HighlightedText component), you can easily build a web demo of your NER model and share that with the rest of your team.

\n\n

Here is an example of a demo that you'll be able to build:

\n\n

\n\n

This tutorial will show how to take a pretrained NER model and deploy it with a Gradio interface. We will show two different ways to use the HighlightedText component -- depending on your NER model, either of these two ways may be easier to learn!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained named-entity recognition model. You can use your own, while in this tutorial, we will use one from the transformers library.

\n\n

Approach 1: List of Entity Dictionaries

\n\n

Many named-entity recognition models output a list of dictionaries. Each dictionary consists of an entity, a \"start\" index, and an \"end\" index. This is, for example, how NER models in the transformers library operate:

\n\n
from transformers import pipeline \nner_pipeline = pipeline(\"ner\")\nner_pipeline(\"Does Chicago have any Pakistani restaurants\")\n
\n\n

Output:

\n\n
[{'entity': 'I-LOC',\n  'score': 0.9988978,\n  'index': 2,\n  'word': 'Chicago',\n  'start': 5,\n  'end': 12},\n {'entity': 'I-MISC',\n  'score': 0.9958592,\n  'index': 5,\n  'word': 'Pakistani',\n  'start': 22,\n  'end': 31}]\n
\n\n

If you have such a model, it is very easy to hook it up to Gradio's HighlightedText component. All you need to do is pass in this list of entities, along with the original text to the model, together as dictionary, with the keys being \"entities\" and \"text\" respectively.

\n\n

Here is a complete example:

\n\n
from transformers import pipeline\n\nimport gradio as gr\n\nner_pipeline = pipeline(\"ner\")\n\nexamples = [\n    \"Does Chicago have any stores and does Joe live here?\",\n]\n\ndef ner(text):\n    output = ner_pipeline(text)\n    return {\"text\": text, \"entities\": output}    \n\ndemo = gr.Interface(ner,\n             gr.Textbox(placeholder=\"Enter sentence here...\"), \n             gr.HighlightedText(),\n             examples=examples)\n\ndemo.launch()\n\n
\n\n

\n\n

Approach 2: List of Tuples

\n\n

An alternative way to pass data into the HighlightedText component is a list of tuples. The first element of each tuple should be the word or words that are being classified into a particular entity. The second element should be the entity label (or None if they should be unlabeled). The HighlightedText component automatically strings together the words and labels to display the entities.

\n\n

In some cases, this can be easier than the first approach. Here is a demo showing this approach using Spacy's parts-of-speech tagger:

\n\n
import gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n    doc = nlp(text)\n    html = displacy.render(doc, style=\"dep\", page=True)\n    html = (\n        \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n\n
\n\n

\n\n
\n\n

And you're done! That's all you need to know to build a web-based GUI for your NER model.

\n\n

Fun tip: you can share your NER demo instantly with others simply by setting share=True in launch().

\n", - "tags": ["NER", "TEXT", "HIGHLIGHT"], - "spaces": [ - "https://huggingface.co/spaces/rajistics/biobert_ner_demo", - "https://huggingface.co/spaces/abidlabs/ner", - "https://huggingface.co/spaces/rajistics/Financial_Analyst_AI" - ], - "url": "/guides/named-entity-recognition/", - "contributor": null - } - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a List[Tuple[str, float | str]]] consisting of spans of text and their associated labels, or a Dict with two keys: (1) \"text\" whose value is the complete text, and (2) \"entities\", which is a list of dictionaries, each of which have the keys: \"entity\" (consisting of the entity label, can alternatively be called \"entity_group\"), \"start\" (the character index where the label starts), and \"end\" (the character index where the label ends). Entities should not overlap.", - "parent": "gradio", - "prev_obj": "HTML", - "next_obj": "Image" - }, - "image": { - "class": null, - "name": "Image", - "description": "Creates an image component that can be used to upload/draw images (as an input) or display images (as an output).", - "tags": { - "preprocessing": "passes the uploaded image as a {numpy.array}, {PIL.Image} or {str} filepath depending on `type` -- unless `tool` is `sketch` AND source is one of `upload` or `webcam`. In these cases, a {dict} with keys `image` and `mask` is passed, and the format of the corresponding values depends on `type`.", - "postprocessing": "expects a {numpy.array}, {PIL.Image} or {str} or {pathlib.Path} filepath to an image and displays the image.", - "examples-format": "a {str} filepath to a local file that contains the image.", - "demos": "image_mod, image_mod_default_image", - "guides": "image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, building-a-pictionary_app, create-your-own-friends-with-a-gan" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | _Image.Image | np.ndarray | None", - "doc": "A PIL Image, numpy array, path or URL for the default value that Image component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "shape", - "annotation": "tuple[int, int] | None", - "doc": "(width, height) shape to crop and resize image when passed to function. If None, matches input image size. Pass None for either width or height to only crop and resize the other.", - "default": "None" - }, - { - "name": "height", - "annotation": "int | None", - "doc": "Height of the displayed image in pixels.", - "default": "None" - }, - { - "name": "width", - "annotation": "int | None", - "doc": "Width of the displayed image in pixels.", - "default": "None" - }, - { - "name": "image_mode", - "annotation": "Literal['1', 'L', 'P', 'RGB', 'RGBA', 'CMYK', 'YCbCr', 'LAB', 'HSV', 'I', 'F']", - "doc": "\"RGB\" if color, or \"L\" if black and white. See https://pillow.readthedocs.io/en/stable/handbook/concepts.html for other supported image modes and their meaning.", - "default": "\"RGB\"" - }, - { - "name": "invert_colors", - "annotation": "bool", - "doc": "whether to invert the image as a preprocessing step.", - "default": "False" - }, - { - "name": "source", - "annotation": "Literal['upload', 'webcam', 'canvas']", - "doc": "Source of image. \"upload\" creates a box where user can drop an image file, \"webcam\" allows user to take snapshot from their webcam, \"canvas\" defaults to a white image that can be edited and drawn upon with tools.", - "default": "\"upload\"" - }, - { - "name": "tool", - "annotation": "Literal['editor', 'select', 'sketch', 'color-sketch'] | None", - "doc": "Tools used for editing. \"editor\" allows a full screen editor (and is the default if source is \"upload\" or \"webcam\"), \"select\" provides a cropping and zoom tool, \"sketch\" allows you to create a binary sketch (and is the default if source=\"canvas\"), and \"color-sketch\" allows you to created a sketch in different colors. \"color-sketch\" can be used with source=\"upload\" or \"webcam\" to allow sketching on an image. \"sketch\" can also be used with \"upload\" or \"webcam\" to create a mask over an image and in that case both the image and mask are passed into the function as a dictionary with keys \"image\" and \"mask\" respectively.", - "default": "None" - }, - { - "name": "type", - "annotation": "Literal['numpy', 'pil', 'filepath']", - "doc": "The format the image is converted to before being passed into the prediction function. \"numpy\" converts the image to a numpy array with shape (height, width, 3) and values from 0 to 255, \"pil\" converts the image to a PIL image object, \"filepath\" passes a str path to a temporary file containing the image.", - "default": "\"numpy\"" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "show_download_button", - "annotation": "bool", - "doc": "If True, will display button to download image.", - "default": "True" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, will allow users to upload and edit an image; if False, can only be used to display images. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "streaming", - "annotation": "bool", - "doc": "If True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'webcam'.", - "default": "False" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "mirror_webcam", - "annotation": "bool", - "doc": "If True webcam will be mirrored. Default is True.", - "default": "True" - }, - { - "name": "brush_radius", - "annotation": "float | None", - "doc": "Size of the brush for Sketch. Default is None which chooses a sensible default", - "default": "None" - }, - { - "name": "brush_color", - "annotation": "str", - "doc": "Color of the brush for Sketch as hex string. Default is \"#000000\".", - "default": "\"#000000\"" - }, - { - "name": "mask_opacity", - "annotation": "float", - "doc": "Opacity of mask drawn on image, as a value between 0 and 1.", - "default": "0.7" - }, - { - "name": "show_share_button", - "annotation": "bool | None", - "doc": "If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Image" - }, - { - "fn": null, - "name": "edit", - "description": "This listener is triggered when the user edits the component (e.g. image) using the built-in editor. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Image" - }, - { - "fn": null, - "name": "clear", - "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Image" - }, - { - "fn": null, - "name": "stream", - "description": "This listener is triggered when the user streams the component (e.g. a live webcam component). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Image" - }, - { - "fn": null, - "name": "upload", - "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Image" - }, - { - "fn": null, - "name": "select", - "description": "Event listener for when the user clicks on a pixel within the image. Uses event data gradio.SelectData to carry `index` to refer to the [x, y] coordinates of the clicked pixel. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Image" - } - ], - "string_shortcuts": [ - ["Image", "image", "Uses default values"], - ["Webcam", "webcam", "Uses source=\"webcam\", interactive=True"], - [ - "Sketchpad", - "sketchpad", - "Uses image_mode=\"L\", source=\"canvas\", shape=(28, 28), invert_colors=True, interactive=True" - ], - [ - "Paint", - "paint", - "Uses source=\"canvas\", tool=\"color-sketch\", interactive=True" - ], - [ - "ImageMask", - "imagemask", - "Uses source=\"upload\", tool=\"sketch\", interactive=True" - ], - [ - "ImagePaint", - "imagepaint", - "Uses source=\"upload\", tool=\"color-sketch\", interactive=True" - ], - ["Pil", "pil", "Uses type=\"pil\""] - ], - "demos": [ - [ - "image_mod", - "import gradio as gr\nimport os\n\n\ndef image_mod(image):\n return image.rotate(45)\n\n\ndemo = gr.Interface(\n image_mod,\n gr.Image(type=\"pil\"),\n \"image\",\n flagging_options=[\"blurry\", \"incorrect\", \"other\"],\n examples=[\n os.path.join(os.path.dirname(__file__), \"images/cheetah1.jpg\"),\n os.path.join(os.path.dirname(__file__), \"images/lion.jpg\"),\n os.path.join(os.path.dirname(__file__), \"images/logo.png\"),\n os.path.join(os.path.dirname(__file__), \"images/tower.jpg\"),\n ],\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "image_mod_default_image", - "import gradio as gr\nimport os\n\n\ndef image_mod(image):\n return image.rotate(45)\n\n\ncheetah = os.path.join(os.path.dirname(__file__), \"images/cheetah1.jpg\")\n\ndemo = gr.Interface(image_mod, gr.Image(type=\"pil\", value=cheetah), \"image\",\n flagging_options=[\"blurry\", \"incorrect\", \"other\"], examples=[\n os.path.join(os.path.dirname(__file__), \"images/lion.jpg\"),\n os.path.join(os.path.dirname(__file__), \"images/logo.png\")\n ])\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [ - { - "name": "image-classification-in-pytorch", - "category": "integrating-other-frameworks", - "pretty_category": "Integrating Other Frameworks", - "guide_index": null, - "absolute_index": 20, - "pretty_name": "Image Classification In Pytorch", - "content": "# Image Classification in PyTorch\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained image classification model, so you should also have `torch` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from [PyTorch Hub](https://pytorch.org/hub/pytorch_vision_resnet/). You can use a different pretrained model or train your own. \n\n```python\nimport torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n```\n\nBecause we will be using the model for inference, we have called the `.eval()` method.\n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n inp = transforms.ToTensor()(inp).unsqueeze(0)\n with torch.no_grad():\n prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)} \n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `PIL` image\n\nThen, the function converts the image to a PIL Image and then eventually a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we use `Image(type=\"pil\")` which creates the component and handles the preprocessing to convert that to a `PIL` image. \n\nThe output component will be a `Label`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as `Label(num_top_classes=3)`.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n inputs=gr.Image(type=\"pil\"),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", - "html": "

Image Classification in PyTorch

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained image classification model, so you should also have torch installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from PyTorch Hub. You can use a different pretrained model or train your own.

\n\n
import torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n
\n\n

Because we will be using the model for inference, we have called the .eval() method.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n  inp = transforms.ToTensor()(inp).unsqueeze(0)\n  with torch.no_grad():\n    prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n    confidences = {labels[i]: float(prediction[i]) for i in range(1000)}    \n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a PIL image
  • \n
\n\n

Then, the function converts the image to a PIL Image and then eventually a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we use Image(type=\"pil\") which creates the component and handles the preprocessing to convert that to a PIL image.

\n\n

The output component will be a Label, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as Label(num_top_classes=3).

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=gr.Image(type=\"pil\"),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", - "tags": ["VISION", "RESNET", "PYTORCH"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/pytorch-image-classifier", - "https://huggingface.co/spaces/pytorch/ResNet", - "https://huggingface.co/spaces/pytorch/ResNext", - "https://huggingface.co/spaces/pytorch/SqueezeNet" - ], - "url": "/guides/image-classification-in-pytorch/", - "contributor": null - }, - { - "name": "image-classification-in-tensorflow", - "category": "integrating-other-frameworks", - "pretty_category": "Integrating Other Frameworks", - "guide_index": null, - "absolute_index": 21, - "pretty_name": "Image Classification In Tensorflow", - "content": "# Image Classification in TensorFlow and Keras\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained Keras image classification model, so you should also have `tensorflow` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from [Keras](https://keras.io/api/applications/mobilenet/). You can use a different pretrained model or train your own. \n\n```python\nimport tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n```\n\nThis line automatically downloads the MobileNet model and weights using the Keras library. \n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `numpy` array\n\nThen, the function adds a batch dimension, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we can use the `\"gradio.inputs.Image\"` class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.\n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=classify_image, \n inputs=gr.Image(shape=(224, 224)),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", - "html": "

Image Classification in TensorFlow and Keras

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained Keras image classification model, so you should also have tensorflow installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from Keras. You can use a different pretrained model or train your own.

\n\n
import tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n
\n\n

This line automatically downloads the MobileNet model and weights using the Keras library.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n  inp = inp.reshape((-1, 224, 224, 3))\n  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n  prediction = inception_net.predict(inp).flatten()\n  confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a numpy array
  • \n
\n\n

Then, the function adds a batch dimension, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we can use the \"gradio.inputs.Image\" class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=classify_image, \n             inputs=gr.Image(shape=(224, 224)),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", - "tags": ["VISION", "MOBILENET", "TENSORFLOW"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/keras-image-classifier" - ], - "url": "/guides/image-classification-in-tensorflow/", - "contributor": null - }, - { - "name": "image-classification-with-vision-transformers", - "category": "integrating-other-frameworks", - "pretty_category": "Integrating Other Frameworks", - "guide_index": null, - "absolute_index": 22, - "pretty_name": "Image Classification With Vision Transformers", - "content": "# Image Classification with Vision Transformers\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control. \n\nState-of-the-art image classifiers are based on the *transformers* architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a **single line of Python**, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started).\n\n## Step 1 \u2014 Choosing a Vision Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a model from the [Hugging Face Model Hub](https://huggingface.co/models?pipeline_tag=image-classification). The Hub contains thousands of models covering dozens of different machine learning tasks. \n\nExpand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.\n\nAt the time of writing, the most popular one is `google/vit-base-patch16-224`, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo. \n\n## Step 2 \u2014 Loading the Vision Transformer Model with Gradio\n\nWhen using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.\n\nBesides the import statement, it only takes a single line of Python to load and launch the demo. \n\nWe use the `gr.Interface.load()` method and pass in the path to the model including the `huggingface/` to designate that it is from the Hugging Face Hub.\n\n```python\nimport gradio as gr\n\ngr.Interface.load(\n \"huggingface/google/vit-base-patch16-224\",\n examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n```\n\nNotice that we have added one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. \n\nThis produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!\n\n\n\n----------\n\nAnd you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", - "html": "

Image Classification with Vision Transformers

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control.

\n\n

State-of-the-art image classifiers are based on the transformers architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a single line of Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Step 1 \u2014 Choosing a Vision Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a model from the Hugging Face Model Hub. The Hub contains thousands of models covering dozens of different machine learning tasks.

\n\n

Expand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.

\n\n

At the time of writing, the most popular one is google/vit-base-patch16-224, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo.

\n\n

Step 2 \u2014 Loading the Vision Transformer Model with Gradio

\n\n

When using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.

\n\n

Besides the import statement, it only takes a single line of Python to load and launch the demo.

\n\n

We use the gr.Interface.load() method and pass in the path to the model including the huggingface/ to designate that it is from the Hugging Face Hub.

\n\n
import gradio as gr\n\ngr.Interface.load(\n             \"huggingface/google/vit-base-patch16-224\",\n             examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n
\n\n

Notice that we have added one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples.

\n\n

This produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!

\n\n\n\n
\n\n

And you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", - "tags": ["VISION", "TRANSFORMERS", "HUB"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/vision-transformer" - ], - "url": "/guides/image-classification-with-vision-transformers/", - "contributor": null - }, - { - "name": "create-your-own-friends-with-a-gan", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 34, - "pretty_name": "Create Your Own Friends With A Gan", - "content": "# Create Your Own Friends with a GAN\n\n\n\n\n\n\n\n## Introduction\n\nIt seems that cryptocurrencies, [NFTs](https://www.nytimes.com/interactive/2022/03/18/technology/nft-guide.html), and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets [may be taxable, such as in Canada](https://www.canada.ca/en/revenue-agency/programs/about-canada-revenue-agency-cra/compliance/digital-currency/cryptocurrency-guide.html), today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated [CryptoPunks](https://www.larvalabs.com/cryptopunks).\n\nGenerative Adversarial Networks, often known just as *GANs*, are a specific class of deep-learning models that are designed to learn from an input dataset to create (*generate!*) new material that is convincingly similar to elements of the original training set. Famously, the website [thispersondoesnotexist.com](https://thispersondoesnotexist.com/) went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even [music](https://salu133445.github.io/musegan/)!\n\nToday we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:\n\n\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained model, also install `torch` and `torchvision`.\n\n## GANs: a very brief introduction\n\nOriginally proposed in [Goodfellow et al. 2014](https://arxiv.org/abs/1406.2661), GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the *generator*, is responsible for generating images. The other network, the *discriminator*, receives an image at a time from the generator along with a **real** image from the training data set. The discriminator then has to guess: which image is the fake?\n\nThe generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (*adversarial!*) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!\n\nFor a more in-depth look at GANs, you can take a look at [this excellent post on Analytics Vidhya](https://www.analyticsvidhya.com/blog/2021/06/a-detailed-explanation-of-gan-with-implementation-using-tensorflow-and-keras/) or this [PyTorch tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html). For now, though, we'll dive into a demo!\n\n## Step 1 \u2014 Create the Generator model\n\nTo generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:\n\n```python\nfrom torch import nn\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n```\n\nWe're taking the generator from [this repo by @teddykoker](https://github.com/teddykoker/cryptopunks-gan/blob/main/train.py#L90), where you can also see the original discriminator model structure.\n\nAfter instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at [nateraw/cryptopunks-gan](https://huggingface.co/nateraw/cryptopunks-gan):\n\n```python\nfrom huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n```\n\n## Step 2 \u2014 Defining a `predict` function\n\nThe `predict` function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our `predict` function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use `torchvision`'s `save_image` function to save the output of the model as a `png` file, and return the file name:\n\n```python\nfrom torchvision.utils import save_image\n\ndef predict(seed):\n num_punks = 4\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWe're giving our `predict` function a `seed` parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.\n\n*Note!* Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.\n\n## Step 3 \u2014 Creating a Gradio interface\n\nAt this point you can even run the code you have with `predict()`, and you'll find your freshly generated punks in your file system at `./punks.png`. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:\n\n* Set a slider input so users can choose the \"seed\" value\n* Use an image component for our output to showcase the generated punks\n* Use our `predict()` to take the seed and generate the images\n\nWith `gr.Interface()`, we can define all of that with a single function call:\n\n```python\nimport gradio as gr\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n ],\n outputs=\"image\",\n).launch()\n```\n\nLaunching the interface should present you with something like this:\n\n\n\n## Step 4 \u2014 Even more punks!\n\nGenerating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the `inputs` list that we pass to `gr.Interface`:\n\n```python\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n ],\n outputs=\"image\",\n).launch()\n```\n\nThe new input will be passed to our `predict()` function, so we have to make some changes to that function to accept a new parameter:\n\n```python\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWhen you relaunch your interface, you should see a second slider that'll let you control the number of punks!\n\n## Step 5 - Polishing it up\n\nYour Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728\n\nWe can add some examples that users can easily try out by adding this to the `gr.Interface`:\n\n```python\ngr.Interface(\n # ...\n # keep everything as it is, and then add\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n```\n\nThe `examples` parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the `inputs`. So in our case, `[seed, num_punks]`. Give it a try!\n\nYou can also try adding a `title`, `description`, and `article` to the `gr.Interface`. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 `article` will also accept HTML, as [explored in a previous guide](/guides/key-features/#descriptive-content)!\n\nWhen you're all done, you may end up with something like this:\n\n\n\nFor reference, here is our full code:\n\n```python\nimport torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n ],\n outputs=\"image\",\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n```\n----------\n\nCongratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can [scour the Hub for more GANs](https://huggingface.co/models?other=gan) (or train your own) and continue making even more awesome demos \ud83e\udd17", - "html": "

Create Your Own Friends with a GAN

\n\n

Introduction

\n\n

It seems that cryptocurrencies, NFTs, and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets may be taxable, such as in Canada, today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated CryptoPunks.

\n\n

Generative Adversarial Networks, often known just as GANs, are a specific class of deep-learning models that are designed to learn from an input dataset to create (generate!) new material that is convincingly similar to elements of the original training set. Famously, the website thispersondoesnotexist.com went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even music!

\n\n

Today we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:

\n\n\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained model, also install torch and torchvision.

\n\n

GANs: a very brief introduction

\n\n

Originally proposed in Goodfellow et al. 2014, GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the generator, is responsible for generating images. The other network, the discriminator, receives an image at a time from the generator along with a real image from the training data set. The discriminator then has to guess: which image is the fake?

\n\n

The generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (adversarial!) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!

\n\n

For a more in-depth look at GANs, you can take a look at this excellent post on Analytics Vidhya or this PyTorch tutorial. For now, though, we'll dive into a demo!

\n\n

Step 1 \u2014 Create the Generator model

\n\n

To generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:

\n\n
from torch import nn\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n
\n\n

We're taking the generator from this repo by @teddykoker, where you can also see the original discriminator model structure.

\n\n

After instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at nateraw/cryptopunks-gan:

\n\n
from huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n
\n\n

Step 2 \u2014 Defining a predict function

\n\n

The predict function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our predict function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use torchvision's save_image function to save the output of the model as a png file, and return the file name:

\n\n
from torchvision.utils import save_image\n\ndef predict(seed):\n    num_punks = 4\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

We're giving our predict function a seed parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.

\n\n

Note! Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.

\n\n

Step 3 \u2014 Creating a Gradio interface

\n\n

At this point you can even run the code you have with predict(<SOME_NUMBER>), and you'll find your freshly generated punks in your file system at ./punks.png. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:

\n\n
    \n
  • Set a slider input so users can choose the \"seed\" value
  • \n
  • Use an image component for our output to showcase the generated punks
  • \n
  • Use our predict() to take the seed and generate the images
  • \n
\n\n

With gr.Interface(), we can define all of that with a single function call:

\n\n
import gradio as gr\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

Launching the interface should present you with something like this:

\n\n\n\n

Step 4 \u2014 Even more punks!

\n\n

Generating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the inputs list that we pass to gr.Interface:

\n\n
gr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

The new input will be passed to our predict() function, so we have to make some changes to that function to accept a new parameter:

\n\n
def predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

When you relaunch your interface, you should see a second slider that'll let you control the number of punks!

\n\n

Step 5 - Polishing it up

\n\n

Your Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728

\n\n

We can add some examples that users can easily try out by adding this to the gr.Interface:

\n\n
gr.Interface(\n    # ...\n    # keep everything as it is, and then add\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n
\n\n

The examples parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the inputs. So in our case, [seed, num_punks]. Give it a try!

\n\n

You can also try adding a title, description, and article to the gr.Interface. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 article will also accept HTML, as explored in a previous guide!

\n\n

When you're all done, you may end up with something like this:

\n\n\n\n

For reference, here is our full code:

\n\n
import torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n    ],\n    outputs=\"image\",\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n
\n\n
\n\n

Congratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can scour the Hub for more GANs (or train your own) and continue making even more awesome demos \ud83e\udd17

\n", - "tags": ["GAN", "IMAGE", "HUB"], - "spaces": [ - "https://huggingface.co/spaces/NimaBoscarino/cryptopunks", - "https://huggingface.co/spaces/nateraw/cryptopunks-generator" - ], - "url": "/guides/create-your-own-friends-with-a-gan/", - "contributor": "Nima Boscarino and Nate Raw" - } - ], - "preprocessing": "passes the uploaded image as a numpy.array, PIL.Image or str filepath depending on `type` -- unless `tool` is `sketch` AND source is one of `upload` or `webcam`. In these cases, a dict with keys `image` and `mask` is passed, and the format of the corresponding values depends on `type`.", - "postprocessing": "expects a numpy.array, PIL.Image or str or pathlib.Path filepath to an image and displays the image.", - "examples-format": "a str filepath to a local file that contains the image.", - "parent": "gradio", - "prev_obj": "HighlightedText", - "next_obj": "Interpretation" - }, - "interpretation": { - "class": null, - "name": "Interpretation", - "description": "Used to create an interpretation widget for a component.
", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a {dict} with keys \"original\" and \"interpretation\".", - "guides": "custom-interpretations-with-blocks" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "component", - "annotation": "Component", - "doc": "Which component to show in the interpretation widget." - }, - { - "name": "visible", - "annotation": "bool", - "doc": "Whether or not the interpretation is visible.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [], - "string_shortcuts": [ - ["Interpretation", "interpretation", "Uses default values"] - ], - "guides": [ - { - "name": "custom-interpretations-with-blocks", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 36, - "pretty_name": "Custom Interpretations With Blocks", - "content": "# Custom Machine Learning Interpretations with Blocks\n\n\n**Prerequisite**: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to [read the Guide to Blocks first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control) as well as the\ninterpretation section of the [Advanced Interface Features Guide](/advanced-interface-features#interpreting-your-predictions).\n\n## Introduction\n\nIf you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the `interpretation` parameter to either \"default\" or \"shap\".\n\nYou may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!\n\nThis guide will show how to:\n\n1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.\n2. Customize how interpretations are displayed in a Blocks app.\n\nLet's get started!\n\n## Setting up the Blocks app\n\nLet's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input `Textbox` and a single output `Label` component.\nBelow is the code for the app as well as the app itself.\n\n```python\nimport gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n pred = sentiment_classifier(text)\n return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n\n classify.click(classifier, input_text, label)\ndemo.launch()\n```\n\n \n\n## Adding interpretations to the app\n\nOur goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!\n\nFor each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those `(word, score)` pairs we can use gradio to visualize them for the user.\n\nThe [shap](https://shap.readthedocs.io/en/stable/index.html) library will help us compute the `(word, score)` pairs and\ngradio will take care of displaying the output to the user.\n\nThe following code computes the `(word, score)` pairs:\n\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n \n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n # Scores contains (word, score) pairs\n \n \n # Format expected by gr.components.Interpretation\n return {\"original\": text, \"interpretation\": scores}\n```\n\nNow, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use `gr.components.Interpretation`.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how `Interface` displays the interpretation output for text.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n interpretation = gr.components.Interpretation(input_text)\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n```\n\n \n\n\n## Customizing how the interpretation is displayed\n\nThe `gr.components.Interpretation` component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?\n\nOne way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.\n\nWe can do this by modifying our `interpretation_function` to additionally return a matplotlib bar plot.\nWe will display it with the `gr.Plot` component in a separate tab.\n\nThis is how the interpretation function will look:\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n # Filter out empty string added by shap\n scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n fig_m = plt.figure()\n \n # Select top 5 words that contribute to positive sentiment\n plt.bar(x=[s[0] for s in scores_desc[:5]],\n height=[s[1] for s in scores_desc[:5]])\n plt.title(\"Top words contributing to positive sentiment\")\n plt.ylabel(\"Shap Value\")\n plt.xlabel(\"Word\")\n return {\"original\": text, \"interpretation\": scores}, fig_m\n```\n\nAnd this is how the app code will look:\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n with gr.Tabs():\n with gr.TabItem(\"Display interpretation with built-in component\"):\n interpretation = gr.components.Interpretation(input_text)\n with gr.TabItem(\"Display interpretation with plot\"):\n interpretation_plot = gr.Plot()\n\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n```\n\nYou can see the demo below!\n\n \n\n## Beyond Sentiment Classification\nAlthough we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an `gr.Image` or `gr.Label` but the input can be almost anything (`gr.Number`, `gr.Slider`, `gr.Radio`, `gr.Image`).\n\nHere is a demo built with blocks of interpretations for an image classification model:\n\n \n\n\n## Closing remarks\n\nWe did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.\n\nWe also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.\n\nAdding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!\n", - "html": "

Custom Machine Learning Interpretations with Blocks

\n\n

Prerequisite: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to read the Guide to Blocks first as well as the\ninterpretation section of the Advanced Interface Features Guide.

\n\n

Introduction

\n\n

If you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the interpretation parameter to either \"default\" or \"shap\".

\n\n

You may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!

\n\n

This guide will show how to:

\n\n
    \n
  1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.
  2. \n
  3. Customize how interpretations are displayed in a Blocks app.
  4. \n
\n\n

Let's get started!

\n\n

Setting up the Blocks app

\n\n

Let's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input Textbox and a single output Label component.\nBelow is the code for the app as well as the app itself.

\n\n
import gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n    pred = sentiment_classifier(text)\n    return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n\n    classify.click(classifier, input_text, label)\ndemo.launch()\n
\n\n

\n\n

Adding interpretations to the app

\n\n

Our goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!

\n\n

For each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those (word, score) pairs we can use gradio to visualize them for the user.

\n\n

The shap library will help us compute the (word, score) pairs and\ngradio will take care of displaying the output to the user.

\n\n

The following code computes the (word, score) pairs:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n    # Scores contains (word, score) pairs\n\n\n    # Format expected by gr.components.Interpretation\n    return {\"original\": text, \"interpretation\": scores}\n
\n\n

Now, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use gr.components.Interpretation.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how Interface displays the interpretation output for text.

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            interpretation = gr.components.Interpretation(input_text)\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n
\n\n

\n\n

Customizing how the interpretation is displayed

\n\n

The gr.components.Interpretation component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?

\n\n

One way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.

\n\n

We can do this by modifying our interpretation_function to additionally return a matplotlib bar plot.\nWe will display it with the gr.Plot component in a separate tab.

\n\n

This is how the interpretation function will look:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n    scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n    # Filter out empty string added by shap\n    scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n    fig_m = plt.figure()\n\n    # Select top 5 words that contribute to positive sentiment\n    plt.bar(x=[s[0] for s in scores_desc[:5]],\n            height=[s[1] for s in scores_desc[:5]])\n    plt.title(\"Top words contributing to positive sentiment\")\n    plt.ylabel(\"Shap Value\")\n    plt.xlabel(\"Word\")\n    return {\"original\": text, \"interpretation\": scores}, fig_m\n
\n\n

And this is how the app code will look:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            with gr.Tabs():\n                with gr.TabItem(\"Display interpretation with built-in component\"):\n                    interpretation = gr.components.Interpretation(input_text)\n                with gr.TabItem(\"Display interpretation with plot\"):\n                    interpretation_plot = gr.Plot()\n\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n
\n\n

You can see the demo below!

\n\n

\n\n

Beyond Sentiment Classification

\n\n

Although we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an gr.Image or gr.Label but the input can be almost anything (gr.Number, gr.Slider, gr.Radio, gr.Image).

\n\n

Here is a demo built with blocks of interpretations for an image classification model:

\n\n

\n\n

Closing remarks

\n\n

We did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.

\n\n

We also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.

\n\n

Adding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!

\n", - "tags": ["INTERPRETATION", "SENTIMENT ANALYSIS"], - "spaces": [], - "url": "/guides/custom-interpretations-with-blocks/", - "contributor": null - } - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a dict with keys \"original\" and \"interpretation\".", - "parent": "gradio", - "prev_obj": "Image", - "next_obj": "JSON" - }, - "json": { - "class": null, - "name": "JSON", - "description": "Used to display arbitrary JSON output prettily.
", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a {str} filepath to a file containing valid JSON -- or a {list} or {dict} that is valid JSON", - "demos": "zip_to_json, blocks_xray" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | dict | list | Callable | None", - "doc": "Default value. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.JSON" - } - ], - "string_shortcuts": [["JSON", "json", "Uses default values"]], - "demos": [ - [ - "zip_to_json", - "from zipfile import ZipFile\n\nimport gradio as gr\n\n\ndef zip_to_json(file_obj):\n files = []\n with ZipFile(file_obj.name) as zfile:\n for zinfo in zfile.infolist():\n files.append(\n {\n \"name\": zinfo.filename,\n \"file_size\": zinfo.file_size,\n \"compressed_size\": zinfo.compress_size,\n }\n )\n return files\n\n\ndemo = gr.Interface(zip_to_json, \"file\", \"json\")\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "blocks_xray", - "import gradio as gr\nimport time\n\ndisease_values = [0.25, 0.5, 0.75]\n\ndef xray_model(diseases, img):\n return [{disease: disease_values[idx] for idx,disease in enumerate(diseases)}]\n\n\ndef ct_model(diseases, img):\n return [{disease: 0.1 for disease in diseases}]\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n# Detect Disease From Scan\nWith this model you can lorem ipsum\n- ipsum 1\n- ipsum 2\n\"\"\"\n )\n gr.DuplicateButton()\n disease = gr.CheckboxGroup(\n info=\"Select the diseases you want to scan for.\",\n choices=[\"Covid\", \"Malaria\", \"Lung Cancer\"], label=\"Disease to Scan For\"\n )\n slider = gr.Slider(0, 100)\n\n with gr.Tab(\"X-ray\") as x_tab:\n with gr.Row():\n xray_scan = gr.Image()\n xray_results = gr.JSON()\n xray_run = gr.Button(\"Run\")\n xray_run.click(\n xray_model,\n inputs=[disease, xray_scan],\n outputs=xray_results,\n api_name=\"xray_model\"\n )\n\n with gr.Tab(\"CT Scan\"):\n with gr.Row():\n ct_scan = gr.Image()\n ct_results = gr.JSON()\n ct_run = gr.Button(\"Run\")\n ct_run.click(\n ct_model,\n inputs=[disease, ct_scan],\n outputs=ct_results,\n api_name=\"ct_model\"\n )\n\n upload_btn = gr.Button(\"Upload Results\", variant=\"primary\")\n upload_btn.click(\n lambda ct, xr: None,\n inputs=[ct_results, xray_results],\n outputs=[],\n )\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a str filepath to a file containing valid JSON -- or a list or dict that is valid JSON", - "parent": "gradio", - "prev_obj": "Interpretation", - "next_obj": "Label" - }, - "label": { - "class": null, - "name": "Label", - "description": "Displays a classification label, along with confidence scores of top categories, if provided.
", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a {Dict[str, float]} of classes and confidences, or {str} with just the class or an {int}/{float} for regression outputs, or a {str} path to a .json file containing a json dictionary in the structure produced by Label.postprocess().", - "demos": "main_note, titanic_survival", - "guides": "image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, building-a-pictionary-app" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "dict[str, float] | str | float | Callable | None", - "doc": "Default value to show in the component. If a str or number is provided, simply displays the string or number. If a {Dict[str, float]} of classes and confidences is provided, displays the top class on top and the `num_top_classes` below, along with their confidence bars. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "num_top_classes", - "annotation": "int | None", - "doc": "number of most confident classes to show.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "color", - "annotation": "str | None", - "doc": "The background color of the label (either a valid css color name or hexadecimal string).", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Label" - }, - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects a category from Label. Uses event data gradio.SelectData to carry `value` referring to name of selected category, and `index` to refer to index. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Label" - } - ], - "string_shortcuts": [["Label", "label", "Uses default values"]], - "demos": [ - [ - "main_note", - "from math import log2, pow\nimport os\n\nimport numpy as np\nfrom scipy.fftpack import fft\n\nimport gradio as gr\n\nA4 = 440\nC0 = A4 * pow(2, -4.75)\nname = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n\n\ndef get_pitch(freq):\n h = round(12 * log2(freq / C0))\n n = h % 12\n return name[n]\n\n\ndef main_note(audio):\n rate, y = audio\n if len(y.shape) == 2:\n y = y.T[0]\n N = len(y)\n T = 1.0 / rate\n yf = fft(y)\n yf2 = 2.0 / N * np.abs(yf[0 : N // 2])\n xf = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)\n\n volume_per_pitch = {}\n total_volume = np.sum(yf2)\n for freq, volume in zip(xf, yf2):\n if freq == 0:\n continue\n pitch = get_pitch(freq)\n if pitch not in volume_per_pitch:\n volume_per_pitch[pitch] = 0\n volume_per_pitch[pitch] += 1.0 * volume / total_volume\n volume_per_pitch = {k: float(v) for k, v in volume_per_pitch.items()}\n return volume_per_pitch\n\n\ndemo = gr.Interface(\n main_note,\n gr.Audio(source=\"microphone\"),\n gr.Label(num_top_classes=4),\n examples=[\n [os.path.join(os.path.dirname(__file__),\"audio/recording1.wav\")],\n [os.path.join(os.path.dirname(__file__),\"audio/cantina.wav\")],\n ],\n interpretation=\"default\",\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "titanic_survival", - "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [ - { - "name": "image-classification-in-pytorch", - "category": "integrating-other-frameworks", - "pretty_category": "Integrating Other Frameworks", - "guide_index": null, - "absolute_index": 20, - "pretty_name": "Image Classification In Pytorch", - "content": "# Image Classification in PyTorch\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained image classification model, so you should also have `torch` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from [PyTorch Hub](https://pytorch.org/hub/pytorch_vision_resnet/). You can use a different pretrained model or train your own. \n\n```python\nimport torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n```\n\nBecause we will be using the model for inference, we have called the `.eval()` method.\n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n inp = transforms.ToTensor()(inp).unsqueeze(0)\n with torch.no_grad():\n prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)} \n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `PIL` image\n\nThen, the function converts the image to a PIL Image and then eventually a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we use `Image(type=\"pil\")` which creates the component and handles the preprocessing to convert that to a `PIL` image. \n\nThe output component will be a `Label`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as `Label(num_top_classes=3)`.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n inputs=gr.Image(type=\"pil\"),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", - "html": "

Image Classification in PyTorch

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained image classification model, so you should also have torch installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from PyTorch Hub. You can use a different pretrained model or train your own.

\n\n
import torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n
\n\n

Because we will be using the model for inference, we have called the .eval() method.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n  inp = transforms.ToTensor()(inp).unsqueeze(0)\n  with torch.no_grad():\n    prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n    confidences = {labels[i]: float(prediction[i]) for i in range(1000)}    \n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a PIL image
  • \n
\n\n

Then, the function converts the image to a PIL Image and then eventually a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we use Image(type=\"pil\") which creates the component and handles the preprocessing to convert that to a PIL image.

\n\n

The output component will be a Label, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as Label(num_top_classes=3).

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=gr.Image(type=\"pil\"),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", - "tags": ["VISION", "RESNET", "PYTORCH"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/pytorch-image-classifier", - "https://huggingface.co/spaces/pytorch/ResNet", - "https://huggingface.co/spaces/pytorch/ResNext", - "https://huggingface.co/spaces/pytorch/SqueezeNet" - ], - "url": "/guides/image-classification-in-pytorch/", - "contributor": null - }, - { - "name": "image-classification-in-tensorflow", - "category": "integrating-other-frameworks", - "pretty_category": "Integrating Other Frameworks", - "guide_index": null, - "absolute_index": 21, - "pretty_name": "Image Classification In Tensorflow", - "content": "# Image Classification in TensorFlow and Keras\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained Keras image classification model, so you should also have `tensorflow` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from [Keras](https://keras.io/api/applications/mobilenet/). You can use a different pretrained model or train your own. \n\n```python\nimport tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n```\n\nThis line automatically downloads the MobileNet model and weights using the Keras library. \n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `numpy` array\n\nThen, the function adds a batch dimension, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we can use the `\"gradio.inputs.Image\"` class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.\n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=classify_image, \n inputs=gr.Image(shape=(224, 224)),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", - "html": "

Image Classification in TensorFlow and Keras

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained Keras image classification model, so you should also have tensorflow installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from Keras. You can use a different pretrained model or train your own.

\n\n
import tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n
\n\n

This line automatically downloads the MobileNet model and weights using the Keras library.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n  inp = inp.reshape((-1, 224, 224, 3))\n  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n  prediction = inception_net.predict(inp).flatten()\n  confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a numpy array
  • \n
\n\n

Then, the function adds a batch dimension, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we can use the \"gradio.inputs.Image\" class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=classify_image, \n             inputs=gr.Image(shape=(224, 224)),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", - "tags": ["VISION", "MOBILENET", "TENSORFLOW"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/keras-image-classifier" - ], - "url": "/guides/image-classification-in-tensorflow/", - "contributor": null - }, - { - "name": "image-classification-with-vision-transformers", - "category": "integrating-other-frameworks", - "pretty_category": "Integrating Other Frameworks", - "guide_index": null, - "absolute_index": 22, - "pretty_name": "Image Classification With Vision Transformers", - "content": "# Image Classification with Vision Transformers\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control. \n\nState-of-the-art image classifiers are based on the *transformers* architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a **single line of Python**, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started).\n\n## Step 1 \u2014 Choosing a Vision Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a model from the [Hugging Face Model Hub](https://huggingface.co/models?pipeline_tag=image-classification). The Hub contains thousands of models covering dozens of different machine learning tasks. \n\nExpand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.\n\nAt the time of writing, the most popular one is `google/vit-base-patch16-224`, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo. \n\n## Step 2 \u2014 Loading the Vision Transformer Model with Gradio\n\nWhen using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.\n\nBesides the import statement, it only takes a single line of Python to load and launch the demo. \n\nWe use the `gr.Interface.load()` method and pass in the path to the model including the `huggingface/` to designate that it is from the Hugging Face Hub.\n\n```python\nimport gradio as gr\n\ngr.Interface.load(\n \"huggingface/google/vit-base-patch16-224\",\n examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n```\n\nNotice that we have added one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. \n\nThis produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!\n\n\n\n----------\n\nAnd you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", - "html": "

Image Classification with Vision Transformers

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control.

\n\n

State-of-the-art image classifiers are based on the transformers architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a single line of Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Step 1 \u2014 Choosing a Vision Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a model from the Hugging Face Model Hub. The Hub contains thousands of models covering dozens of different machine learning tasks.

\n\n

Expand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.

\n\n

At the time of writing, the most popular one is google/vit-base-patch16-224, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo.

\n\n

Step 2 \u2014 Loading the Vision Transformer Model with Gradio

\n\n

When using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.

\n\n

Besides the import statement, it only takes a single line of Python to load and launch the demo.

\n\n

We use the gr.Interface.load() method and pass in the path to the model including the huggingface/ to designate that it is from the Hugging Face Hub.

\n\n
import gradio as gr\n\ngr.Interface.load(\n             \"huggingface/google/vit-base-patch16-224\",\n             examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n
\n\n

Notice that we have added one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples.

\n\n

This produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!

\n\n\n\n
\n\n

And you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", - "tags": ["VISION", "TRANSFORMERS", "HUB"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/vision-transformer" - ], - "url": "/guides/image-classification-with-vision-transformers/", - "contributor": null - }, - { - "name": "building-a-pictionary-app", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 33, - "pretty_name": "Building A Pictionary App", - "content": "# Building a Pictionary App\n\n\n\n\n## Introduction\n\nHow well can an algorithm guess what you're drawing? A few years ago, Google released the **Quick Draw** dataset, which contains drawings made by humans of a variety of every objects. Researchers have used this dataset to train models to guess Pictionary-style drawings. \n\nSuch models are perfect to use with Gradio's *sketchpad* input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and will look like this (try drawing something!):\n\n\n\nLet's get started! This guide covers how to build a pictionary app (step-by-step): \n\n1. [Set up the Sketch Recognition Model](#1-set-up-the-sketch-recognition-model)\n2. [Define a `predict` function](#2-define-a-predict-function)\n3. [Create a Gradio Interface](#3-create-a-gradio-interface)\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained sketchpad model, also install `torch`.\n\n## 1. Set up the Sketch Recognition Model\n\nFirst, you will need a sketch recognition model. Since many researchers have already trained their own models on the Quick Draw dataset, we will use a pretrained model in this tutorial. Our model is a light 1.5 MB model trained by Nate Raw, that [you can download here](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/pytorch_model.bin). \n\nIf you are interested, here [is the code](https://github.com/nateraw/quickdraw-pytorch) that was used to train the model. We will simply load the pretrained model in PyTorch, as follows:\n\n```python\nimport torch\nfrom torch import nn\n\nmodel = nn.Sequential(\n nn.Conv2d(1, 32, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(32, 64, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(64, 128, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Flatten(),\n nn.Linear(1152, 256),\n nn.ReLU(),\n nn.Linear(256, len(LABELS)),\n)\nstate_dict = torch.load('pytorch_model.bin', map_location='cpu')\nmodel.load_state_dict(state_dict, strict=False)\nmodel.eval()\n```\n\n## 2. Define a `predict` function\n\nNext, you will need to define a function that takes in the *user input*, which in this case is a sketched image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/class_names.txt).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nfrom pathlib import Path\n\nLABELS = Path('class_names.txt').read_text().splitlines()\n\ndef predict(img):\n x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.\n with torch.no_grad():\n out = model(x)\n probabilities = torch.nn.functional.softmax(out[0], dim=0)\n values, indices = torch.topk(probabilities, 5)\n confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}\n return confidences\n```\n\nLet's break this down. The function takes one parameters:\n\n* `img`: the input image as a `numpy` array\n\nThen, the function converts the image to a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the top five predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## 3. Create a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a sketchpad. To create a sketchpad input, we can use the convenient string shortcut, `\"sketchpad\"` which creates a canvas for a user to draw on and handles the preprocessing to convert that to a numpy array. \n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form.\n\nFinally, we'll add one more parameter, setting `live=True`, which allows our interface to run in real time, adjusting its predictions every time a user draws on the sketchpad. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n inputs=\"sketchpad\",\n outputs=\"label\",\n live=True).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try drawing something, like a \"snake\" or a \"laptop\"):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a Pictionary-style guessing app. Have fun and try to find some edge cases \ud83e\uddd0\n\n", - "html": "

Building a Pictionary App

\n\n

Introduction

\n\n

How well can an algorithm guess what you're drawing? A few years ago, Google released the Quick Draw dataset, which contains drawings made by humans of a variety of every objects. Researchers have used this dataset to train models to guess Pictionary-style drawings.

\n\n

Such models are perfect to use with Gradio's sketchpad input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and will look like this (try drawing something!):

\n\n\n\n

Let's get started! This guide covers how to build a pictionary app (step-by-step):

\n\n
    \n
  1. Set up the Sketch Recognition Model
  2. \n
  3. Define a predict function
  4. \n
  5. Create a Gradio Interface
  6. \n
\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained sketchpad model, also install torch.

\n\n

1. Set up the Sketch Recognition Model

\n\n

First, you will need a sketch recognition model. Since many researchers have already trained their own models on the Quick Draw dataset, we will use a pretrained model in this tutorial. Our model is a light 1.5 MB model trained by Nate Raw, that you can download here.

\n\n

If you are interested, here is the code that was used to train the model. We will simply load the pretrained model in PyTorch, as follows:

\n\n
import torch\nfrom torch import nn\n\nmodel = nn.Sequential(\n    nn.Conv2d(1, 32, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Conv2d(32, 64, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Conv2d(64, 128, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Flatten(),\n    nn.Linear(1152, 256),\n    nn.ReLU(),\n    nn.Linear(256, len(LABELS)),\n)\nstate_dict = torch.load('pytorch_model.bin',    map_location='cpu')\nmodel.load_state_dict(state_dict, strict=False)\nmodel.eval()\n
\n\n

2. Define a predict function

\n\n

Next, you will need to define a function that takes in the user input, which in this case is a sketched image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
from pathlib import Path\n\nLABELS = Path('class_names.txt').read_text().splitlines()\n\ndef predict(img):\n    x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.\n    with torch.no_grad():\n        out = model(x)\n    probabilities = torch.nn.functional.softmax(out[0], dim=0)\n    values, indices = torch.topk(probabilities, 5)\n    confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}\n    return confidences\n
\n\n

Let's break this down. The function takes one parameters:

\n\n
    \n
  • img: the input image as a numpy array
  • \n
\n\n

Then, the function converts the image to a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the top five predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

3. Create a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a sketchpad. To create a sketchpad input, we can use the convenient string shortcut, \"sketchpad\" which creates a canvas for a user to draw on and handles the preprocessing to convert that to a numpy array.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form.

\n\n

Finally, we'll add one more parameter, setting live=True, which allows our interface to run in real time, adjusting its predictions every time a user draws on the sketchpad. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=\"sketchpad\",\n             outputs=\"label\",\n             live=True).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try drawing something, like a \"snake\" or a \"laptop\"):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a Pictionary-style guessing app. Have fun and try to find some edge cases \ud83e\uddd0

\n", - "tags": ["SKETCHPAD", "LABELS", "LIVE"], - "spaces": ["https://huggingface.co/spaces/nateraw/quickdraw"], - "url": "/guides/building-a-pictionary-app/", - "contributor": null - } - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a Dict[str, float] of classes and confidences, or str with just the class or an int/float for regression outputs, or a str path to a .json file containing a json dictionary in the structure produced by Label.postprocess().", - "parent": "gradio", - "prev_obj": "JSON", - "next_obj": "LinePlot" - }, - "lineplot": { - "class": null, - "name": "LinePlot", - "description": "Create a line plot.

", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a pandas dataframe with the data to plot.", - "demos": "line_plot, live_dashboard" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "pd.DataFrame | Callable | None", - "doc": "The pandas dataframe containing the data to display in a scatter plot.", - "default": "None" - }, - { - "name": "x", - "annotation": "str | None", - "doc": "Column corresponding to the x axis.", - "default": "None" - }, - { - "name": "y", - "annotation": "str | None", - "doc": "Column corresponding to the y axis.", - "default": "None" - }, - { - "name": "color", - "annotation": "str | None", - "doc": "The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values.", - "default": "None" - }, - { - "name": "stroke_dash", - "annotation": "str | None", - "doc": "The column to determine the symbol used to draw the line, e.g. dashed lines, dashed lines with points.", - "default": "None" - }, - { - "name": "overlay_point", - "annotation": "bool | None", - "doc": "Whether to draw a point on the line for each (x, y) coordinate pair.", - "default": "None" - }, - { - "name": "title", - "annotation": "str | None", - "doc": "The title to display on top of the chart.", - "default": "None" - }, - { - "name": "tooltip", - "annotation": "list[str] | str | None", - "doc": "The column (or list of columns) to display on the tooltip when a user hovers a point on the plot.", - "default": "None" - }, - { - "name": "x_title", - "annotation": "str | None", - "doc": "The title given to the x axis. By default, uses the value of the x parameter.", - "default": "None" - }, - { - "name": "y_title", - "annotation": "str | None", - "doc": "The title given to the y axis. By default, uses the value of the y parameter.", - "default": "None" - }, - { - "name": "color_legend_title", - "annotation": "str | None", - "doc": "The title given to the color legend. By default, uses the value of color parameter.", - "default": "None" - }, - { - "name": "stroke_dash_legend_title", - "annotation": "str | None", - "doc": "The title given to the stroke_dash legend. By default, uses the value of the stroke_dash parameter.", - "default": "None" - }, - { - "name": "color_legend_position", - "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", - "doc": "The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", - "default": "None" - }, - { - "name": "stroke_dash_legend_position", - "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", - "doc": "The position of the stoke_dash legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", - "default": "None" - }, - { - "name": "height", - "annotation": "int | None", - "doc": "The height of the plot in pixels.", - "default": "None" - }, - { - "name": "width", - "annotation": "int | None", - "doc": "The width of the plot in pixels.", - "default": "None" - }, - { - "name": "x_lim", - "annotation": "list[int] | None", - "doc": "A tuple or list containing the limits for the x-axis, specified as [x_min, x_max].", - "default": "None" - }, - { - "name": "y_lim", - "annotation": "list[int] | None", - "doc": "A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].", - "default": "None" - }, - { - "name": "caption", - "annotation": "str | None", - "doc": "The (optional) caption to display below the plot.", - "default": "None" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.", - "default": "True" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "The (optional) label to display on the top left corner of the plot.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "Whether the label should be displayed.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": null, - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": null, - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": null, - "default": "160" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "Whether the plot should be visible.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.LinePlot" - }, - { - "fn": null, - "name": "clear", - "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.LinePlot" - } - ], - "string_shortcuts": [["LinePlot", "lineplot", "Uses default values"]], - "demos": [ - [ - "line_plot", - "import gradio as gr\nfrom vega_datasets import data\n\nstocks = data.stocks()\ngapminder = data.gapminder()\ngapminder = gapminder.loc[\n gapminder.country.isin([\"Argentina\", \"Australia\", \"Afghanistan\"])\n]\nclimate = data.climate()\nseattle_weather = data.seattle_weather()\n\n## Or generate your own fake data, here's an example for stocks:\n#\n# import pandas as pd\n# import random\n#\n# stocks = pd.DataFrame(\n# {\n# \"symbol\": [\n# random.choice(\n# [\n# \"MSFT\",\n# \"AAPL\",\n# \"AMZN\",\n# \"IBM\",\n# \"GOOG\",\n# ]\n# )\n# for _ in range(120)\n# ],\n# \"date\": [\n# pd.Timestamp(year=2000 + i, month=j, day=1)\n# for i in range(10)\n# for j in range(1, 13)\n# ],\n# \"price\": [random.randint(10, 200) for _ in range(120)],\n# }\n# )\n\n\ndef line_plot_fn(dataset):\n if dataset == \"stocks\":\n return gr.LinePlot.update(\n stocks,\n x=\"date\",\n y=\"price\",\n color=\"symbol\",\n color_legend_position=\"bottom\",\n title=\"Stock Prices\",\n tooltip=[\"date\", \"price\", \"symbol\"],\n height=300,\n width=500,\n )\n elif dataset == \"climate\":\n return gr.LinePlot.update(\n climate,\n x=\"DATE\",\n y=\"HLY-TEMP-NORMAL\",\n y_lim=[250, 500],\n title=\"Climate\",\n tooltip=[\"DATE\", \"HLY-TEMP-NORMAL\"],\n height=300,\n width=500,\n )\n elif dataset == \"seattle_weather\":\n return gr.LinePlot.update(\n seattle_weather,\n x=\"date\",\n y=\"temp_min\",\n tooltip=[\"weather\", \"date\"],\n overlay_point=True,\n title=\"Seattle Weather\",\n height=300,\n width=500,\n )\n elif dataset == \"gapminder\":\n return gr.LinePlot.update(\n gapminder,\n x=\"year\",\n y=\"life_expect\",\n color=\"country\",\n title=\"Life expectancy for countries\",\n stroke_dash=\"cluster\",\n x_lim=[1950, 2010],\n tooltip=[\"country\", \"life_expect\"],\n stroke_dash_legend_title=\"Country Cluster\",\n height=300,\n width=500,\n )\n\n\nwith gr.Blocks() as line_plot:\n with gr.Row():\n with gr.Column():\n dataset = gr.Dropdown(\n choices=[\"stocks\", \"climate\", \"seattle_weather\", \"gapminder\"],\n value=\"stocks\",\n )\n with gr.Column():\n plot = gr.LinePlot()\n dataset.change(line_plot_fn, inputs=dataset, outputs=plot)\n line_plot.load(fn=line_plot_fn, inputs=dataset, outputs=plot)\n\n\nif __name__ == \"__main__\":\n line_plot.launch()\n" - ], - [ - "live_dashboard", - "import math\n\nimport pandas as pd\n\nimport gradio as gr\nimport datetime\nimport numpy as np\n\n\ndef get_time():\n return datetime.datetime.now()\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2 * math.pi * period * x)\n update = gr.LinePlot.update(\n value=pd.DataFrame({\"x\": x, \"y\": y}),\n x=\"x\",\n y=\"y\",\n title=\"Plot (updates every second)\",\n width=600,\n height=350,\n )\n plot_end += 2 * math.pi\n if plot_end > 1000:\n plot_end = 2 * math.pi\n return update\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n c_time2 = gr.Textbox(label=\"Current Time refreshed every second\")\n gr.Textbox(\n \"Change the value of the slider to automatically update the plot\",\n label=\"\",\n )\n period = gr.Slider(\n label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1\n )\n plot = gr.LinePlot(show_label=False)\n with gr.Column():\n name = gr.Textbox(label=\"Enter your name\")\n greeting = gr.Textbox(label=\"Greeting\")\n button = gr.Button(value=\"Greet\")\n button.click(lambda s: f\"Hello {s}\", name, greeting)\n\n demo.load(lambda: datetime.datetime.now(), None, c_time2, every=1)\n dep = demo.load(get_plot, None, plot, every=1)\n period.change(get_plot, period, plot, every=1, cancels=[dep])\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n" - ] - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a pandas dataframe with the data to plot.", - "parent": "gradio", - "prev_obj": "Label", - "next_obj": "Markdown" - }, - "markdown": { - "class": null, - "name": "Markdown", - "description": "Used to render arbitrary Markdown output. Can also render latex enclosed by dollar signs.
", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a valid {str} that can be rendered as Markdown.", - "demos": "blocks_hello, blocks_kinematics", - "guides": "key-features" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | Callable", - "doc": "Value to show in Markdown component. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "\"\"" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "rtl", - "annotation": "bool", - "doc": "If True, sets the direction of the rendered text to right-to-left. Default is False, which renders text left-to-right.", - "default": "False" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Markdown" - } - ], - "string_shortcuts": [["Markdown", "markdown", "Uses default values"]], - "demos": [ - [ - "blocks_hello", - "import gradio as gr\n\ndef welcome(name):\n return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Hello World!\n Start typing below to see the output.\n \"\"\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n inp.change(welcome, inp, out)\n\nif __name__ == \"__main__\":\n demo.launch()" - ], - [ - "blocks_kinematics", - "import pandas as pd\nimport numpy as np\n\nimport gradio as gr\n\n\ndef plot(v, a):\n g = 9.81\n theta = a / 180 * 3.14\n tmax = ((2 * v) * np.sin(theta)) / g\n timemat = tmax * np.linspace(0, 1, 40)\n\n x = (v * timemat) * np.cos(theta)\n y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat**2))\n df = pd.DataFrame({\"x\": x, \"y\": y})\n return df\n\n\ndemo = gr.Blocks()\n\nwith demo:\n gr.Markdown(\n r\"Let's do some kinematics! Choose the speed and angle to see the trajectory. Remember that the range $R = v_0^2 \\cdot \\frac{\\sin(2\\theta)}{g}$\"\n )\n\n with gr.Row():\n speed = gr.Slider(1, 30, 25, label=\"Speed\")\n angle = gr.Slider(0, 90, 45, label=\"Angle\")\n output = gr.LinePlot(\n x=\"x\",\n y=\"y\",\n overlay_point=True,\n tooltip=[\"x\", \"y\"],\n x_lim=[0, 100],\n y_lim=[0, 60],\n width=350,\n height=300,\n )\n btn = gr.Button(value=\"Run\")\n btn.click(plot, [speed, angle], output)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [ - { - "name": "key-features", - "category": "getting-started", - "pretty_category": "Getting Started", - "guide_index": 2, - "absolute_index": 1, - "pretty_name": "Key Features", - "content": "# Key Features\n\nLet's go through some of the most popular features of Gradio! Here are Gradio's key features:\n\n1. [Adding example inputs](#example-inputs)\n2. [Passing custom error messages](#alerts)\n3. [Adding descriptive content](#descriptive-content)\n4. [Setting up flagging](#flagging)\n5. [Preprocessing and postprocessing](#preprocessing-and-postprocessing)\n6. [Styling demos](#styling)\n7. [Queuing users](#queuing)\n8. [Iterative outputs](#iterative-outputs)\n9. [Progress bars](#progress-bars)\n10. [Batch functions](#batch-functions)\n11. [Running on collaborative notebooks](#colab-notebooks)\n\n## Example Inputs\n\nYou can provide example data that a user can easily load into `Interface`. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a **nested list** to the `examples=` keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the [Docs](https://gradio.app/docs#components).\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n if num2 == 0:\n raise gr.Error(\"Cannot divide by zero!\")\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\", \n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n examples=[\n [5, \"add\", 3],\n [4, \"divide\", 2],\n [-4, \"multiply\", 2.5],\n [0, \"subtract\", 1.2],\n ],\n title=\"Toy Calculator\",\n description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n```\n\n\nYou can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the `examples_per_page` argument of `Interface`).\n\nContinue learning about examples in the [More On Examples](https://gradio.app/more-on-examples) guide.\n\n## Alerts\n\nYou wish to pass custom error messages to the user. To do so, raise a `gr.Error(\"custom message\")` to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the [docs](https://gradio.app/docs#error). \n\nYou can also issue `gr.Warning(\"message\")` and `gr.Info(\"message\")` by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work. \n\nNote below how the `gr.Error` has to be raised, while the `gr.Warning` and `gr.Info` are single lines.\n\n```python\ndef start_process(name):\n gr.Info(\"Starting process\")\n if name is None:\n gr.Warning(\"Name is empty\")\n ...\n if success == False:\n raise gr.Error(\"Process failed\")\n```\n \n## Descriptive Content\n\nIn the previous example, you may have noticed the `title=` and `description=` keyword arguments in the `Interface` constructor that helps users understand your app.\n\nThere are three arguments in the `Interface` constructor to specify where this content should go:\n\n* `title`: which accepts text and can display it at the very top of interface, and also becomes the page title.\n* `description`: which accepts text, markdown or HTML and places it right under the title.\n* `article`: which also accepts text, markdown or HTML and places it below the interface.\n\n![annotated](https://github.com/gradio-app/gradio/blob/main/guides/assets/annotated.png?raw=true)\n\nIf you're using the `Blocks` API instead, you can insert text, markdown, or HTML anywhere using the `gr.Markdown(...)` or `gr.HTML(...)` components, with descriptive content inside the `Component` constructor.\n\nAnother useful keyword argument is `label=`, which is present in every `Component`. This modifies the label text at the top of each `Component`. You can also add the `info=` keyword argument to form elements like `Textbox` or `Radio` to provide further information on their usage.\n\n```python\ngr.Number(label='Age', info='In years, must be greater than 0')\n```\n\n## Flagging\n\nBy default, an `Interface` will have \"Flag\" button. When a user testing your `Interface` sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the `flagging_dir=` argument to the `Interface` constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.\n\nFor example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- calculator.py\n+-- flagged/\n| +-- logs.csv\n```\n\n*flagged/logs.csv*\n\n```csv\nnum1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n```\n\nWith the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- sepia.py\n+-- flagged/\n| +-- logs.csv\n| +-- im/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n\n*flagged/logs.csv*\n\n```csv\nim,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.\n\n## Preprocessing and Postprocessing\n\n![](https://github.com/gradio-app/gradio/blob/main/js/_website/src/assets/img/dataflow.svg?raw=true)\n\nAs you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.\n\nWhen a component is used as an input, Gradio automatically handles the *preprocessing* needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a `numpy` array).\n\nSimilarly, when a component is used as an output, Gradio automatically handles the *postprocessing* needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a `Gallery` of images in base64 format).\n\nYou can control the *preprocessing* using the parameters when constructing the image component. For example, here if you instantiate the `Image` component with the following parameters, it will convert the image to the `PIL` type and reshape it to be `(100, 100)` no matter the original size that it was submitted as:\n\n```py\nimg = gr.Image(shape=(100, 100), type=\"pil\")\n```\n\nIn contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:\n\n```py\nimg = gr.Image(invert_colors=True, type=\"numpy\")\n```\n\nPostprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the `Image` a `numpy` array or a `str` filepath?) and postprocesses it into a format that can be displayed by the browser.\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the preprocessing-related parameters for each Component.\n\n## Styling\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Interface` constructor. For example:\n\n```python\ndemo = gr.Interface(..., theme=gr.themes.Monochrome())\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](https://gradio.app/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n\n```python\nwith gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nSome components can be additionally styled through the `style()` method. For example:\n\n```python\nimg = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n```\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the styling options for each Component.\n\n## Queuing\n\nIf your app expects heavy traffic, use the `queue()` method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(...).queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```python\nwith gr.Blocks() as demo:\n #...\ndemo.queue()\ndemo.launch()\n```\n\nYou can control the number of requests processed at a single time as such:\n\n```python\ndemo.queue(concurrency_count=3)\n```\n\nSee the [Docs on queueing](/docs/#queue) on configuring other queuing parameters.\n\nTo specify only certain functions for queueing in Blocks:\n\n```python\nwith gr.Blocks() as demo2:\n num1 = gr.Number()\n num2 = gr.Number()\n output = gr.Number()\n gr.Button(\"Add\").click(\n lambda a, b: a + b, [num1, num2], output)\n gr.Button(\"Multiply\").click(\n lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n```\n\n## Iterative Outputs\n\nIn some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.\n\nIn such cases, you can supply a **generator** function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single `return` value, a function should `yield` a series of values instead. Usually the `yield` statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:\n\n```python\ndef my_generator(x):\n for i in range(x):\n yield i\n```\n\nYou supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:\n\n```python\nimport gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n for _ in range(steps):\n time.sleep(1)\n image = np.random.random((600, 600, 3))\n yield image\n image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n```\n\n\nNote that we've added a `time.sleep(1)` in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).\n\nSupplying a generator into Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n## Progress Bars\n\nGradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a `gr.Progress` instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the `tqdm()` method of the `Progress` instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.\n\n```python\nimport gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n progress(0, desc=\"Starting\")\n time.sleep(1)\n progress(0.05)\n new_string = \"\"\n for letter in progress.tqdm(word, desc=\"Reversing\"):\n time.sleep(0.25)\n new_string = letter + new_string\n return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n demo.queue(concurrency_count=10).launch()\n\n```\n\n\nIf you use the `tqdm` library, you can even report progress updates automatically from any `tqdm.tqdm` that already exists within your function by setting the default argument as `gr.Progress(track_tqdm=True)`!\n\n## Batch Functions\n\nGradio supports the ability to pass *batch* functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.\n\nFor example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:\n\n```py\nimport time\n\ndef trim_words(words, lens):\n trimmed_words = []\n time.sleep(5)\n for w, l in zip(words, lens):\n trimmed_words.append(w[:int(l)]) \n return [trimmed_words]\n```\n\nThe advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically *batch* incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe `batch=True` and `max_batch_size=16` -- both of these parameters can be passed\ninto event triggers or into the `Interface` class)\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n word = gr.Textbox(label=\"word\")\n leng = gr.Number(label=\"leng\")\n output = gr.Textbox(label=\"Output\")\n with gr.Row():\n run = gr.Button()\n\n event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n```\n\nIn the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face `transformers` and `diffusers` models\nwork very naturally with Gradio's batch mode: here's [an example demo using diffusers to\ngenerate images in batches](https://github.com/gradio-app/gradio/blob/main/demo/diffusers_with_batching/run.py)\n\nNote: using batch functions with Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n\n## Colab Notebooks\n\n\nGradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as [Google Colab](https://colab.research.google.com/). In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by [service worker tunneling](https://github.com/tensorflow/tensorboard/blob/master/docs/design/colab_integration.md), which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use [SSH tunneling](https://coderwall.com/p/ohk6cg/remote-access-to-ipython-notebooks-via-ssh) to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, [discussed in the next Guide](https://gradio.app/guides/sharing-your-app/#sharing-demos). ", - "html": "

Key Features

\n\n

Let's go through some of the most popular features of Gradio! Here are Gradio's key features:

\n\n
    \n
  1. Adding example inputs
  2. \n
  3. Passing custom error messages
  4. \n
  5. Adding descriptive content
  6. \n
  7. Setting up flagging
  8. \n
  9. Preprocessing and postprocessing
  10. \n
  11. Styling demos
  12. \n
  13. Queuing users
  14. \n
  15. Iterative outputs
  16. \n
  17. Progress bars
  18. \n
  19. Batch functions
  20. \n
  21. Running on collaborative notebooks
  22. \n
\n\n

Example Inputs

\n\n

You can provide example data that a user can easily load into Interface. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a nested list to the examples= keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the Docs.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        if num2 == 0:\n            raise gr.Error(\"Cannot divide by zero!\")\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\", \n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    examples=[\n        [5, \"add\", 3],\n        [4, \"divide\", 2],\n        [-4, \"multiply\", 2.5],\n        [0, \"subtract\", 1.2],\n    ],\n    title=\"Toy Calculator\",\n    description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n
\n\n

\n\n

You can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the examples_per_page argument of Interface).

\n\n

Continue learning about examples in the More On Examples guide.

\n\n

Alerts

\n\n

You wish to pass custom error messages to the user. To do so, raise a gr.Error(\"custom message\") to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the docs.

\n\n

You can also issue gr.Warning(\"message\") and gr.Info(\"message\") by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work.

\n\n

Note below how the gr.Error has to be raised, while the gr.Warning and gr.Info are single lines.

\n\n
def start_process(name):\n    gr.Info(\"Starting process\")\n    if name is None:\n        gr.Warning(\"Name is empty\")\n    ...\n    if success == False:\n        raise gr.Error(\"Process failed\")\n
\n\n

Descriptive Content

\n\n

In the previous example, you may have noticed the title= and description= keyword arguments in the Interface constructor that helps users understand your app.

\n\n

There are three arguments in the Interface constructor to specify where this content should go:

\n\n
    \n
  • title: which accepts text and can display it at the very top of interface, and also becomes the page title.
  • \n
  • description: which accepts text, markdown or HTML and places it right under the title.
  • \n
  • article: which also accepts text, markdown or HTML and places it below the interface.
  • \n
\n\n

\"annotated\"

\n\n

If you're using the Blocks API instead, you can insert text, markdown, or HTML anywhere using the gr.Markdown(...) or gr.HTML(...) components, with descriptive content inside the Component constructor.

\n\n

Another useful keyword argument is label=, which is present in every Component. This modifies the label text at the top of each Component. You can also add the info= keyword argument to form elements like Textbox or Radio to provide further information on their usage.

\n\n
gr.Number(label='Age', info='In years, must be greater than 0')\n
\n\n

Flagging

\n\n

By default, an Interface will have \"Flag\" button. When a user testing your Interface sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the flagging_dir= argument to the Interface constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.

\n\n

For example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- calculator.py\n+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n
\n\n

With the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- sepia.py\n+-- flagged/\n|   +-- logs.csv\n|   +-- im/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.

\n\n

Preprocessing and Postprocessing

\n\n

\"\"

\n\n

As you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.

\n\n

When a component is used as an input, Gradio automatically handles the preprocessing needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a numpy array).

\n\n

Similarly, when a component is used as an output, Gradio automatically handles the postprocessing needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a Gallery of images in base64 format).

\n\n

You can control the preprocessing using the parameters when constructing the image component. For example, here if you instantiate the Image component with the following parameters, it will convert the image to the PIL type and reshape it to be (100, 100) no matter the original size that it was submitted as:

\n\n
img = gr.Image(shape=(100, 100), type=\"pil\")\n
\n\n

In contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:

\n\n
img = gr.Image(invert_colors=True, type=\"numpy\")\n
\n\n

Postprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the Image a numpy array or a str filepath?) and postprocesses it into a format that can be displayed by the browser.

\n\n

Take a look at the Docs to see all the preprocessing-related parameters for each Component.

\n\n

Styling

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Interface constructor. For example:

\n\n
demo = gr.Interface(..., theme=gr.themes.Monochrome())\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.\nThe base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

Some components can be additionally styled through the style() method. For example:

\n\n
img = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n
\n\n

Take a look at the Docs to see all the styling options for each Component.

\n\n

Queuing

\n\n

If your app expects heavy traffic, use the queue() method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).

\n\n

With Interface:

\n\n
demo = gr.Interface(...).queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
with gr.Blocks() as demo:\n    #...\ndemo.queue()\ndemo.launch()\n
\n\n

You can control the number of requests processed at a single time as such:

\n\n
demo.queue(concurrency_count=3)\n
\n\n

See the Docs on queueing on configuring other queuing parameters.

\n\n

To specify only certain functions for queueing in Blocks:

\n\n
with gr.Blocks() as demo2:\n    num1 = gr.Number()\n    num2 = gr.Number()\n    output = gr.Number()\n    gr.Button(\"Add\").click(\n        lambda a, b: a + b, [num1, num2], output)\n    gr.Button(\"Multiply\").click(\n        lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n
\n\n

Iterative Outputs

\n\n

In some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.

\n\n

In such cases, you can supply a generator function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single return value, a function should yield a series of values instead. Usually the yield statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:

\n\n
def my_generator(x):\n    for i in range(x):\n        yield i\n
\n\n

You supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:

\n\n
import gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n    for _ in range(steps):\n        time.sleep(1)\n        image = np.random.random((600, 600, 3))\n        yield image\n    image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n    yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n
\n\n

\n\n

Note that we've added a time.sleep(1) in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).

\n\n

Supplying a generator into Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Progress Bars

\n\n

Gradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a gr.Progress instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the tqdm() method of the Progress instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.

\n\n
import gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n    progress(0, desc=\"Starting\")\n    time.sleep(1)\n    progress(0.05)\n    new_string = \"\"\n    for letter in progress.tqdm(word, desc=\"Reversing\"):\n        time.sleep(0.25)\n        new_string = letter + new_string\n    return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n    demo.queue(concurrency_count=10).launch()\n\n
\n\n

\n\n

If you use the tqdm library, you can even report progress updates automatically from any tqdm.tqdm that already exists within your function by setting the default argument as gr.Progress(track_tqdm=True)!

\n\n

Batch Functions

\n\n

Gradio supports the ability to pass batch functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.

\n\n

For example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:

\n\n
import time\n\ndef trim_words(words, lens):\n    trimmed_words = []\n    time.sleep(5)\n    for w, l in zip(words, lens):\n        trimmed_words.append(w[:int(l)])        \n    return [trimmed_words]\n
\n\n

The advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically batch incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe batch=True and max_batch_size=16 -- both of these parameters can be passed\ninto event triggers or into the Interface class)

\n\n

With Interface:

\n\n
demo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n                    batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        word = gr.Textbox(label=\"word\")\n        leng = gr.Number(label=\"leng\")\n        output = gr.Textbox(label=\"Output\")\n    with gr.Row():\n        run = gr.Button()\n\n    event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n
\n\n

In the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face transformers and diffusers models\nwork very naturally with Gradio's batch mode: here's an example demo using diffusers to\ngenerate images in batches

\n\n

Note: using batch functions with Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Colab Notebooks

\n\n

Gradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as Google Colab. In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by service worker tunneling, which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use SSH tunneling to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, discussed in the next Guide.

\n", - "tags": [], - "spaces": [], - "url": "/guides/key-features/", - "contributor": null - } - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a valid str that can be rendered as Markdown.", - "parent": "gradio", - "prev_obj": "LinePlot", - "next_obj": "Model3D" - }, - "model3d": { - "class": null, - "name": "Model3D", - "description": "Component allows users to upload or view 3D Model files (.obj, .glb, or .gltf).
", - "tags": { - "preprocessing": "This component passes the uploaded file as a {str}filepath.", - "postprocessing": "expects function to return a {str} or {pathlib.Path} filepath of type (.obj, glb, or .gltf)", - "demos": "model3D", - "guides": "how-to-use-3D-model-component" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | Callable | None", - "doc": "path to (.obj, glb, or .gltf) file to show in model3D viewer. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "clear_color", - "annotation": "list[float] | None", - "doc": "background color of scene", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Model3D" - }, - { - "fn": null, - "name": "edit", - "description": "This listener is triggered when the user edits the component (e.g. image) using the built-in editor. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Model3D" - }, - { - "fn": null, - "name": "clear", - "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Model3D" - }, - { - "fn": null, - "name": "upload", - "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Model3D" - } - ], - "string_shortcuts": [["Model3D", "model3d", "Uses default values"]], - "demos": [ - [ - "model3D", - "import gradio as gr\nimport os\n\n\ndef load_mesh(mesh_file_name):\n return mesh_file_name\n\n\ndemo = gr.Interface(\n fn=load_mesh,\n inputs=gr.Model3D(),\n outputs=gr.Model3D(\n clear_color=[0.0, 0.0, 0.0, 0.0], label=\"3D Model\"),\n examples=[\n [os.path.join(os.path.dirname(__file__), \"files/Bunny.obj\")],\n [os.path.join(os.path.dirname(__file__), \"files/Duck.glb\")],\n [os.path.join(os.path.dirname(__file__), \"files/Fox.gltf\")],\n [os.path.join(os.path.dirname(__file__), \"files/face.obj\")],\n ],\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [ - { - "name": "how-to-use-3D-model-component", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 38, - "pretty_name": "How To Use 3D Model Component", - "content": "# How to Use the 3D Model Component\n\n\n\n\n## Introduction\n\n3D models are becoming more popular in machine learning and make for some of the most fun demos to experiment with. Using `gradio`, you can easily build a demo of your 3D image model and share it with anyone. The Gradio 3D Model component accepts 3 file types including: *.obj*, *.glb*, & *.gltf*.\n\nThis guide will show you how to build a demo for your 3D image model in a few lines of code; like the one below. Play around with 3D object by clicking around, dragging and zooming:\n\n \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](https://gradio.app/guides/quickstart).\n\n\n## Taking a Look at the Code\n\nLet's take a look at how to create the minimal interface above. The prediction function in this case will just return the original 3D model mesh, but you can change this function to run inference on your machine learning model. We'll take a look at more complex examples below.\n\n```python\nimport gradio as gr\n\ndef load_mesh(mesh_file_name):\n return mesh_file_name\n\ndemo = gr.Interface(\n fn=load_mesh,\n inputs=gr.Model3D(),\n outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label=\"3D Model\"),\n examples=[\n [\"files/Bunny.obj\"],\n [\"files/Duck.glb\"],\n [\"files/Fox.gltf\"],\n [\"files/face.obj\"],\n ],\n cache_examples=True,\n)\n\ndemo.launch()\n```\n\nLet's break down the code above:\n\n`load_mesh`: This is our 'prediction' function and for simplicity, this function will take in the 3D model mesh and return it.\n\nCreating the Interface:\n\n* `fn`: the prediction function that is used when the user clicks submit. In our case this is the `load_mesh` function.\n* `inputs`: create a model3D input component. The input expects an uploaded file as a {str} filepath.\n* `outputs`: create a model3D output component. The output component also expects a file as a {str} filepath.\n * `clear_color`: this is the background color of the 3D model canvas. Expects RGBa values.\n * `label`: the label that appears on the top left of the component.\n* `examples`: list of 3D model files. The 3D model component can accept *.obj*, *.glb*, & *.gltf* file types.\n* `cache_examples`: saves the predicted output for the examples, to save time on inference.\n\n\n## Exploring mode complex Model3D Demos:\n\nBelow is a demo that uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object. Take a look at the [app.py](https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-obj/blob/main/app.py) file for a peek into the code and the model prediction function.\n \n\nBelow is a demo that uses the PIFu model to convert an image of a clothed human into a 3D digitized model. Take a look at the [spaces.py](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization/blob/main/PIFu/spaces.py) file for a peek into the code and the model prediction function.\n\n \n\n----------\n\nAnd you're done! That's all the code you need to build an interface for your Model3D model. Here are some references that you may find useful:\n\n* Gradio's [\"Getting Started\" guide](https://gradio.app/getting_started/)\n* The first [3D Model Demo](https://huggingface.co/spaces/dawood/Model3D) and [complete code](https://huggingface.co/spaces/dawood/Model3D/tree/main) (on Hugging Face Spaces)\n", - "html": "

How to Use the 3D Model Component

\n\n

Introduction

\n\n

3D models are becoming more popular in machine learning and make for some of the most fun demos to experiment with. Using gradio, you can easily build a demo of your 3D image model and share it with anyone. The Gradio 3D Model component accepts 3 file types including: .obj, .glb, & .gltf.

\n\n

This guide will show you how to build a demo for your 3D image model in a few lines of code; like the one below. Play around with 3D object by clicking around, dragging and zooming:

\n\n

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Taking a Look at the Code

\n\n

Let's take a look at how to create the minimal interface above. The prediction function in this case will just return the original 3D model mesh, but you can change this function to run inference on your machine learning model. We'll take a look at more complex examples below.

\n\n
import gradio as gr\n\ndef load_mesh(mesh_file_name):\n    return mesh_file_name\n\ndemo = gr.Interface(\n    fn=load_mesh,\n    inputs=gr.Model3D(),\n    outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0],  label=\"3D Model\"),\n    examples=[\n        [\"files/Bunny.obj\"],\n        [\"files/Duck.glb\"],\n        [\"files/Fox.gltf\"],\n        [\"files/face.obj\"],\n    ],\n    cache_examples=True,\n)\n\ndemo.launch()\n
\n\n

Let's break down the code above:

\n\n

load_mesh: This is our 'prediction' function and for simplicity, this function will take in the 3D model mesh and return it.

\n\n

Creating the Interface:

\n\n
    \n
  • fn: the prediction function that is used when the user clicks submit. In our case this is the load_mesh function.
  • \n
  • inputs: create a model3D input component. The input expects an uploaded file as a {str} filepath.
  • \n
  • outputs: create a model3D output component. The output component also expects a file as a {str} filepath.\n
      \n
    • clear_color: this is the background color of the 3D model canvas. Expects RGBa values.
    • \n
    • label: the label that appears on the top left of the component.
    • \n
  • \n
  • examples: list of 3D model files. The 3D model component can accept .obj, .glb, & .gltf file types.
  • \n
  • cache_examples: saves the predicted output for the examples, to save time on inference.
  • \n
\n\n

Exploring mode complex Model3D Demos:

\n\n

Below is a demo that uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object. Take a look at the app.py file for a peek into the code and the model prediction function.\n

\n\n

Below is a demo that uses the PIFu model to convert an image of a clothed human into a 3D digitized model. Take a look at the spaces.py file for a peek into the code and the model prediction function.

\n\n

\n\n
\n\n

And you're done! That's all the code you need to build an interface for your Model3D model. Here are some references that you may find useful:

\n\n\n", - "tags": ["VISION", "IMAGE"], - "spaces": [ - "https://huggingface.co/spaces/dawood/Model3D", - "https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization", - "https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-obj" - ], - "url": "/guides/how-to-use-3D-model-component/", - "contributor": null - } - ], - "preprocessing": "This component passes the uploaded file as a strfilepath.", - "postprocessing": "expects function to return a str or pathlib.Path filepath of type (.obj, glb, or .gltf)", - "parent": "gradio", - "prev_obj": "Markdown", - "next_obj": "Number" - }, - "number": { - "class": null, - "name": "Number", - "description": "Creates a numeric field for user to enter numbers as input or display numeric output.
", - "tags": { - "preprocessing": "passes field value as a {float} or {int} into the function, depending on `precision`.", - "postprocessing": "expects an {int} or {float} returned from the function and sets field value to it.", - "examples-format": "a {float} or {int} representing the number's value.", - "demos": "tax_calculator, titanic_survival, blocks_simple_squares" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "float | Callable | None", - "doc": "default value. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "info", - "annotation": "str | None", - "doc": "additional component description.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, will be editable; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "precision", - "annotation": "int | None", - "doc": "Precision to round input/output to. If set to 0, will round to nearest integer and convert type to int. If None, no rounding happens.", - "default": "None" - }, - { - "name": "minimum", - "annotation": "float | None", - "doc": "Minimum value. Only applied when component is used as an input. If a user provides a smaller value, a gr.Error exception is raised by the backend.", - "default": "None" - }, - { - "name": "maximum", - "annotation": "float | None", - "doc": "Maximum value. Only applied when component is used as an input. If a user provides a larger value, a gr.Error exception is raised by the backend.", - "default": "None" - }, - { - "name": "step", - "annotation": "float", - "doc": "The interval between allowed numbers in the component. Can be used along with optional parameters `minimum` and `maximum` to create a range of legal values starting from `minimum` and incrementing according to this parameter.", - "default": "1" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Number" - }, - { - "fn": null, - "name": "input", - "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Number" - }, - { - "fn": null, - "name": "submit", - "description": "This listener is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Number" - }, - { - "fn": null, - "name": "focus", - "description": "This listener is triggered when the component is focused (e.g. when the user clicks inside a textbox). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Number" - }, - { - "fn": null, - "name": "blur", - "description": "This listener is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Number" - } - ], - "string_shortcuts": [["Number", "number", "Uses default values"]], - "demos": [ - [ - "tax_calculator", - "import gradio as gr\n\ndef tax_calculator(income, marital_status, assets):\n tax_brackets = [(10, 0), (25, 8), (60, 12), (120, 20), (250, 30)]\n total_deductible = sum(assets[\"Cost\"])\n taxable_income = income - total_deductible\n\n total_tax = 0\n for bracket, rate in tax_brackets:\n if taxable_income > bracket:\n total_tax += (taxable_income - bracket) * rate / 100\n\n if marital_status == \"Married\":\n total_tax *= 0.75\n elif marital_status == \"Divorced\":\n total_tax *= 0.8\n\n return round(total_tax)\n\ndemo = gr.Interface(\n tax_calculator,\n [\n \"number\",\n gr.Radio([\"Single\", \"Married\", \"Divorced\"]),\n gr.Dataframe(\n headers=[\"Item\", \"Cost\"],\n datatype=[\"str\", \"number\"],\n label=\"Assets Purchased this Year\",\n ),\n ],\n \"number\",\n examples=[\n [10000, \"Married\", [[\"Suit\", 5000], [\"Laptop\", 800], [\"Car\", 1800]]],\n [80000, \"Single\", [[\"Suit\", 800], [\"Watch\", 1800], [\"Car\", 800]]],\n ],\n)\n\ndemo.launch()\n" - ], - [ - "titanic_survival", - "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "blocks_simple_squares", - "import gradio as gr\n\ndemo = gr.Blocks(css=\"\"\"#btn {color: red} .abc {font-family: \"Comic Sans MS\", \"Comic Sans\", cursive !important}\"\"\")\n\nwith demo:\n default_json = {\"a\": \"a\"}\n\n num = gr.State(value=0)\n squared = gr.Number(value=0)\n btn = gr.Button(\"Next Square\", elem_id=\"btn\", elem_classes=[\"abc\", \"def\"])\n\n stats = gr.State(value=default_json)\n table = gr.JSON()\n\n def increase(var, stats_history):\n var += 1\n stats_history[str(var)] = var**2\n return var, var**2, stats_history, stats_history\n\n btn.click(increase, [num, stats], [num, squared, stats, table])\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "passes field value as a float or int into the function, depending on `precision`.", - "postprocessing": "expects an int or float returned from the function and sets field value to it.", - "examples-format": "a float or int representing the number's value.", - "parent": "gradio", - "prev_obj": "Model3D", - "next_obj": "Plot" - }, - "plot": { - "class": null, - "name": "Plot", - "description": "Used to display various kinds of plots (matplotlib, plotly, or bokeh are supported)
", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects either a {matplotlib.figure.Figure}, a {plotly.graph_objects._figure.Figure}, or a {dict} corresponding to a bokeh plot (json_item format)", - "demos": "altair_plot, outbreak_forecast, blocks_kinematics, stock_forecast, map_airbnb", - "guides": "plot-component-for-maps" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "Callable | None | pd.DataFrame", - "doc": "Optionally, supply a default plot object to display, must be a matplotlib, plotly, altair, or bokeh figure, or a callable. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Plot" - }, - { - "fn": null, - "name": "clear", - "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Plot" - } - ], - "string_shortcuts": [["Plot", "plot", "Uses default values"]], - "demos": [ - [ - "altair_plot", - "import altair as alt\nimport gradio as gr\nimport numpy as np\nimport pandas as pd\nfrom vega_datasets import data\n\n\ndef make_plot(plot_type):\n if plot_type == \"scatter_plot\":\n cars = data.cars()\n return alt.Chart(cars).mark_point().encode(\n x='Horsepower',\n y='Miles_per_Gallon',\n color='Origin',\n )\n elif plot_type == \"heatmap\":\n # Compute x^2 + y^2 across a 2D grid\n x, y = np.meshgrid(range(-5, 5), range(-5, 5))\n z = x ** 2 + y ** 2\n\n # Convert this grid to columnar data expected by Altair\n source = pd.DataFrame({'x': x.ravel(),\n 'y': y.ravel(),\n 'z': z.ravel()})\n return alt.Chart(source).mark_rect().encode(\n x='x:O',\n y='y:O',\n color='z:Q'\n )\n elif plot_type == \"us_map\":\n states = alt.topo_feature(data.us_10m.url, 'states')\n source = data.income.url\n\n return alt.Chart(source).mark_geoshape().encode(\n shape='geo:G',\n color='pct:Q',\n tooltip=['name:N', 'pct:Q'],\n facet=alt.Facet('group:N', columns=2),\n ).transform_lookup(\n lookup='id',\n from_=alt.LookupData(data=states, key='id'),\n as_='geo'\n ).properties(\n width=300,\n height=175,\n ).project(\n type='albersUsa'\n )\n elif plot_type == \"interactive_barplot\":\n source = data.movies.url\n\n pts = alt.selection(type=\"single\", encodings=['x'])\n\n rect = alt.Chart(data.movies.url).mark_rect().encode(\n alt.X('IMDB_Rating:Q', bin=True),\n alt.Y('Rotten_Tomatoes_Rating:Q', bin=True),\n alt.Color('count()',\n scale=alt.Scale(scheme='greenblue'),\n legend=alt.Legend(title='Total Records')\n )\n )\n\n circ = rect.mark_point().encode(\n alt.ColorValue('grey'),\n alt.Size('count()',\n legend=alt.Legend(title='Records in Selection')\n )\n ).transform_filter(\n pts\n )\n\n bar = alt.Chart(source).mark_bar().encode(\n x='Major_Genre:N',\n y='count()',\n color=alt.condition(pts, alt.ColorValue(\"steelblue\"), alt.ColorValue(\"grey\"))\n ).properties(\n width=550,\n height=200\n ).add_selection(pts)\n\n plot = alt.vconcat(\n rect + circ,\n bar\n ).resolve_legend(\n color=\"independent\",\n size=\"independent\"\n )\n return plot\n elif plot_type == \"radial\":\n source = pd.DataFrame({\"values\": [12, 23, 47, 6, 52, 19]})\n\n base = alt.Chart(source).encode(\n theta=alt.Theta(\"values:Q\", stack=True),\n radius=alt.Radius(\"values\", scale=alt.Scale(type=\"sqrt\", zero=True, rangeMin=20)),\n color=\"values:N\",\n )\n\n c1 = base.mark_arc(innerRadius=20, stroke=\"#fff\")\n\n c2 = base.mark_text(radiusOffset=10).encode(text=\"values:Q\")\n\n return c1 + c2\n elif plot_type == \"multiline\":\n source = data.stocks()\n\n highlight = alt.selection(type='single', on='mouseover',\n fields=['symbol'], nearest=True)\n\n base = alt.Chart(source).encode(\n x='date:T',\n y='price:Q',\n color='symbol:N'\n )\n\n points = base.mark_circle().encode(\n opacity=alt.value(0)\n ).add_selection(\n highlight\n ).properties(\n width=600\n )\n\n lines = base.mark_line().encode(\n size=alt.condition(~highlight, alt.value(1), alt.value(3))\n )\n\n return points + lines\n\n\nwith gr.Blocks() as demo:\n button = gr.Radio(label=\"Plot type\",\n choices=['scatter_plot', 'heatmap', 'us_map',\n 'interactive_barplot', \"radial\", \"multiline\"], value='scatter_plot')\n plot = gr.Plot(label=\"Plot\")\n button.change(make_plot, inputs=button, outputs=[plot])\n demo.load(make_plot, inputs=[button], outputs=[plot])\n\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "outbreak_forecast", - "import altair\n\nimport gradio as gr\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport plotly.express as px\nimport pandas as pd\n\n\ndef outbreak(plot_type, r, month, countries, social_distancing):\n months = [\"January\", \"February\", \"March\", \"April\", \"May\"]\n m = months.index(month)\n start_day = 30 * m\n final_day = 30 * (m + 1)\n x = np.arange(start_day, final_day + 1)\n pop_count = {\"USA\": 350, \"Canada\": 40, \"Mexico\": 300, \"UK\": 120}\n if social_distancing:\n r = sqrt(r)\n df = pd.DataFrame({\"day\": x})\n for country in countries:\n df[country] = x ** (r) * (pop_count[country] + 1)\n\n if plot_type == \"Matplotlib\":\n fig = plt.figure()\n plt.plot(df[\"day\"], df[countries].to_numpy())\n plt.title(\"Outbreak in \" + month)\n plt.ylabel(\"Cases\")\n plt.xlabel(\"Days since Day 0\")\n plt.legend(countries)\n return fig\n elif plot_type == \"Plotly\":\n fig = px.line(df, x=\"day\", y=countries)\n fig.update_layout(\n title=\"Outbreak in \" + month,\n xaxis_title=\"Cases\",\n yaxis_title=\"Days Since Day 0\",\n )\n return fig\n elif plot_type == \"Altair\":\n df = df.melt(id_vars=\"day\").rename(columns={\"variable\": \"country\"})\n fig = altair.Chart(df).mark_line().encode(x=\"day\", y='value', color='country')\n return fig\n else:\n raise ValueError(\"A plot type must be selected\")\n\n\ninputs = [\n gr.Dropdown([\"Matplotlib\", \"Plotly\", \"Altair\"], label=\"Plot Type\"),\n gr.Slider(1, 4, 3.2, label=\"R\"),\n gr.Dropdown([\"January\", \"February\", \"March\", \"April\", \"May\"], label=\"Month\"),\n gr.CheckboxGroup(\n [\"USA\", \"Canada\", \"Mexico\", \"UK\"], label=\"Countries\", value=[\"USA\", \"Canada\"]\n ),\n gr.Checkbox(label=\"Social Distancing?\"),\n]\noutputs = gr.Plot()\n\ndemo = gr.Interface(\n fn=outbreak,\n inputs=inputs,\n outputs=outputs,\n examples=[\n [\"Matplotlib\", 2, \"March\", [\"Mexico\", \"UK\"], True],\n [\"Altair\", 2, \"March\", [\"Mexico\", \"Canada\"], True],\n [\"Plotly\", 3.6, \"February\", [\"Canada\", \"Mexico\", \"UK\"], False],\n ],\n cache_examples=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n\n\n" - ], - [ - "blocks_kinematics", - "import pandas as pd\nimport numpy as np\n\nimport gradio as gr\n\n\ndef plot(v, a):\n g = 9.81\n theta = a / 180 * 3.14\n tmax = ((2 * v) * np.sin(theta)) / g\n timemat = tmax * np.linspace(0, 1, 40)\n\n x = (v * timemat) * np.cos(theta)\n y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat**2))\n df = pd.DataFrame({\"x\": x, \"y\": y})\n return df\n\n\ndemo = gr.Blocks()\n\nwith demo:\n gr.Markdown(\n r\"Let's do some kinematics! Choose the speed and angle to see the trajectory. Remember that the range $R = v_0^2 \\cdot \\frac{\\sin(2\\theta)}{g}$\"\n )\n\n with gr.Row():\n speed = gr.Slider(1, 30, 25, label=\"Speed\")\n angle = gr.Slider(0, 90, 45, label=\"Angle\")\n output = gr.LinePlot(\n x=\"x\",\n y=\"y\",\n overlay_point=True,\n tooltip=[\"x\", \"y\"],\n x_lim=[0, 100],\n y_lim=[0, 60],\n width=350,\n height=300,\n )\n btn = gr.Button(value=\"Run\")\n btn.click(plot, [speed, angle], output)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "stock_forecast", - "import matplotlib.pyplot as plt\nimport numpy as np\n\nimport gradio as gr\n\n\ndef plot_forecast(final_year, companies, noise, show_legend, point_style):\n start_year = 2020\n x = np.arange(start_year, final_year + 1)\n year_count = x.shape[0]\n plt_format = ({\"cross\": \"X\", \"line\": \"-\", \"circle\": \"o--\"})[point_style]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for i, company in enumerate(companies):\n series = np.arange(0, year_count, dtype=float)\n series = series**2 * (i + 1)\n series += np.random.rand(year_count) * noise\n ax.plot(x, series, plt_format)\n if show_legend:\n plt.legend(companies)\n return fig\n\n\ndemo = gr.Interface(\n plot_forecast,\n [\n gr.Radio([2025, 2030, 2035, 2040], label=\"Project to:\"),\n gr.CheckboxGroup([\"Google\", \"Microsoft\", \"Gradio\"], label=\"Company Selection\"),\n gr.Slider(1, 100, label=\"Noise Level\"),\n gr.Checkbox(label=\"Show Legend\"),\n gr.Dropdown([\"cross\", \"line\", \"circle\"], label=\"Style\"),\n ],\n gr.Plot(label=\"forecast\"),\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "map_airbnb", - "import gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = filtered_df[\"name\"].tolist()\n prices = filtered_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n fig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=filtered_df['latitude'].tolist(),\n lon=filtered_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\nif __name__ == \"__main__\":\n demo.launch()" - ] - ], - "guides": [ - { - "name": "plot-component-for-maps", - "category": "tabular-data-science-and-plots", - "pretty_category": "Tabular Data Science And Plots", - "guide_index": null, - "absolute_index": 27, - "pretty_name": "Plot Component For Maps", - "content": "# How to Use the Plot Component for Maps\n\n\n\n## Introduction\n\nThis guide explains how you can use Gradio to plot geographical data on a map using the `gradio.Plot` component. The Gradio `Plot` component works with Matplotlib, Bokeh and Plotly. Plotly is what we will be working with in this guide. Plotly allows developers to easily create all sorts of maps with their geographical data. Take a look [here](https://plotly.com/python/maps/) for some examples.\n\n## Overview\n\nWe will be using the New York City Airbnb dataset, which is hosted on kaggle [here](https://www.kaggle.com/datasets/dgomonov/new-york-city-airbnb-open-data). I've uploaded it to the Hugging Face Hub as a dataset [here](https://huggingface.co/datasets/gradio/NYC-Airbnb-Open-Data) for easier use and download. Using this data we will plot Airbnb locations on a map output and allow filtering based on price and location. Below is the demo that we will be building. \u26a1\ufe0f\n\n\n\n## Step 1 - Loading CSV data \ud83d\udcbe\n\nLet's start by loading the Airbnb NYC data from the Hugging Face Hub.\n\n```python\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n new_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = new_df[\"name\"].tolist()\n prices = new_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n```\n\nIn the code above, we first load the csv data into a pandas dataframe. Let's begin by defining a function that we will use as the prediction function for the gradio app. This function will accept the minimum price and maximum price range as well as the list of boroughs to filter the resulting map. We can use the passed in values (`min_price`, `max_price`, and list of `boroughs`) to filter the dataframe and create `new_df`. Next we will create `text_list` of the names and prices of each Airbnb to use as labels on the map.\n\n## Step 2 - Map Figure \ud83c\udf10\n\nPlotly makes it easy to work with maps. Let's take a look below how we can create a map figure.\n\n```python\nimport plotly.graph_objects as go\n\nfig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=new_df['latitude'].tolist(),\n lon=new_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\nfig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n)\n```\n\nAbove, we create a scatter plot on mapbox by passing it our list of latitudes and longitudes to plot markers. We also pass in our custom data of names and prices for additional info to appear on every marker we hover over. Next we use `update_layout` to specify other map settings such as zoom, and centering.\n\nMore info [here](https://plotly.com/python/scattermapbox/) on scatter plots using Mapbox and Plotly.\n\n## Step 3 - Gradio App \u26a1\ufe0f\n\nWe will use two `gr.Number` components and a `gr.CheckboxGroup` to allow users of our app to specify price ranges and borough locations. We will then use the `gr.Plot` component as an output for our Plotly + Mapbox map we created earlier.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n```\n\nWe layout these components using the `gr.Column` and `gr.Row` and we'll also add event triggers for when the demo first loads and when our \"Update Filter\" button is clicked in order to trigger the map to update with our new filters.\n\nThis is what the full demo code looks like:\n\n```python\nimport gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = filtered_df[\"name\"].tolist()\n prices = filtered_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n fig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=filtered_df['latitude'].tolist(),\n lon=filtered_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\ndemo.launch()\n```\n\n## Step 4 - Deployment \ud83e\udd17\n\nIf you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the `share=True` parameter to `launch`.\n\nBut what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.\n\nIf you haven't used Spaces before, follow the previous guide [here](/using_hugging_face_integrations).\n\n## Conclusion \ud83c\udf89\n\nAnd you're all done! That's all the code you need to build a map demo.\n\nHere's a link to the demo [Map demo](https://huggingface.co/spaces/gradio/map_airbnb) and [complete code](https://huggingface.co/spaces/gradio/map_airbnb/blob/main/run.py) (on Hugging Face Spaces)\n", - "html": "

How to Use the Plot Component for Maps

\n\n

Introduction

\n\n

This guide explains how you can use Gradio to plot geographical data on a map using the gradio.Plot component. The Gradio Plot component works with Matplotlib, Bokeh and Plotly. Plotly is what we will be working with in this guide. Plotly allows developers to easily create all sorts of maps with their geographical data. Take a look here for some examples.

\n\n

Overview

\n\n

We will be using the New York City Airbnb dataset, which is hosted on kaggle here. I've uploaded it to the Hugging Face Hub as a dataset here for easier use and download. Using this data we will plot Airbnb locations on a map output and allow filtering based on price and location. Below is the demo that we will be building. \u26a1\ufe0f

\n\n

\n\n

Step 1 - Loading CSV data \ud83d\udcbe

\n\n

Let's start by loading the Airbnb NYC data from the Hugging Face Hub.

\n\n
from datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n    new_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n            (df['price'] > min_price) & (df['price'] < max_price)]\n    names = new_df[\"name\"].tolist()\n    prices = new_df[\"price\"].tolist()\n    text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n
\n\n

In the code above, we first load the csv data into a pandas dataframe. Let's begin by defining a function that we will use as the prediction function for the gradio app. This function will accept the minimum price and maximum price range as well as the list of boroughs to filter the resulting map. We can use the passed in values (min_price, max_price, and list of boroughs) to filter the dataframe and create new_df. Next we will create text_list of the names and prices of each Airbnb to use as labels on the map.

\n\n

Step 2 - Map Figure \ud83c\udf10

\n\n

Plotly makes it easy to work with maps. Let's take a look below how we can create a map figure.

\n\n
import plotly.graph_objects as go\n\nfig = go.Figure(go.Scattermapbox(\n            customdata=text_list,\n            lat=new_df['latitude'].tolist(),\n            lon=new_df['longitude'].tolist(),\n            mode='markers',\n            marker=go.scattermapbox.Marker(\n                size=6\n            ),\n            hoverinfo=\"text\",\n            hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\nfig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n)\n
\n\n

Above, we create a scatter plot on mapbox by passing it our list of latitudes and longitudes to plot markers. We also pass in our custom data of names and prices for additional info to appear on every marker we hover over. Next we use update_layout to specify other map settings such as zoom, and centering.

\n\n

More info here on scatter plots using Mapbox and Plotly.

\n\n

Step 3 - Gradio App \u26a1\ufe0f

\n\n

We will use two gr.Number components and a gr.CheckboxGroup to allow users of our app to specify price ranges and borough locations. We will then use the gr.Plot component as an output for our Plotly + Mapbox map we created earlier.

\n\n
with gr.Blocks() as demo:\n    with gr.Column():\n        with gr.Row():\n            min_price = gr.Number(value=250, label=\"Minimum Price\")\n            max_price = gr.Number(value=1000, label=\"Maximum Price\")\n        boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n        btn = gr.Button(value=\"Update Filter\")\n        map = gr.Plot()\n    demo.load(filter_map, [min_price, max_price, boroughs], map)\n    btn.click(filter_map, [min_price, max_price, boroughs], map)\n
\n\n

We layout these components using the gr.Column and gr.Row and we'll also add event triggers for when the demo first loads and when our \"Update Filter\" button is clicked in order to trigger the map to update with our new filters.

\n\n

This is what the full demo code looks like:

\n\n
import gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n    filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n          (df['price'] > min_price) & (df['price'] < max_price)]\n    names = filtered_df[\"name\"].tolist()\n    prices = filtered_df[\"price\"].tolist()\n    text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n    fig = go.Figure(go.Scattermapbox(\n            customdata=text_list,\n            lat=filtered_df['latitude'].tolist(),\n            lon=filtered_df['longitude'].tolist(),\n            mode='markers',\n            marker=go.scattermapbox.Marker(\n                size=6\n            ),\n            hoverinfo=\"text\",\n            hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\ndemo.launch()\n
\n\n

Step 4 - Deployment \ud83e\udd17

\n\n

If you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the share=True parameter to launch.

\n\n

But what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.

\n\n

If you haven't used Spaces before, follow the previous guide here.

\n\n

Conclusion \ud83c\udf89

\n\n

And you're all done! That's all the code you need to build a map demo.

\n\n

Here's a link to the demo Map demo and complete code (on Hugging Face Spaces)

\n", - "tags": ["PLOTS", "MAPS"], - "spaces": [], - "url": "/guides/plot-component-for-maps/", - "contributor": null - } - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects either a matplotlib.figure.Figure, a plotly.graph_objects._figure.Figure, or a dict corresponding to a bokeh plot (json_item format)", - "parent": "gradio", - "prev_obj": "Number", - "next_obj": "Radio" - }, - "radio": { - "class": null, - "name": "Radio", - "description": "Creates a set of radio buttons of which only one can be selected.
", - "tags": { - "preprocessing": "passes the value of the selected radio button as a {str} or its index as an {int} into the function, depending on `type`.", - "postprocessing": "expects a {str} corresponding to the value of the radio button to be selected.", - "examples-format": "a {str} representing the radio option to select.", - "demos": "sentence_builder, titanic_survival, blocks_essay" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "choices", - "annotation": "list[str] | None", - "doc": "list of options to select from.", - "default": "None" - }, - { - "name": "value", - "annotation": "str | Callable | None", - "doc": "the button selected by default. If None, no button is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "type", - "annotation": "str", - "doc": "Type of value to be returned by component. \"value\" returns the string of the choice selected, \"index\" returns the index of the choice selected.", - "default": "\"value\"" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "info", - "annotation": "str | None", - "doc": "additional component description.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, choices in this radio group will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Radio" - }, - { - "fn": null, - "name": "input", - "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Radio" - }, - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects Radio option. Uses event data gradio.SelectData to carry `value` referring to label of selected option, and `index` to refer to index. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Radio" - } - ], - "string_shortcuts": [["Radio", "radio", "Uses default values"]], - "demos": [ - [ - "sentence_builder", - "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "titanic_survival", - "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "blocks_essay", - "import gradio as gr\n\n\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.Textbox.update(lines=2, visible=True)\n elif choice == \"long\":\n return gr.Textbox.update(lines=8, visible=True)\n else:\n return gr.Textbox.update(visible=False)\n\n\nwith gr.Blocks() as demo:\n radio = gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n )\n text = gr.Textbox(lines=2, interactive=True).style(show_copy_button=True)\n\n radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "passes the value of the selected radio button as a str or its index as an int into the function, depending on `type`.", - "postprocessing": "expects a str corresponding to the value of the radio button to be selected.", - "examples-format": "a str representing the radio option to select.", - "parent": "gradio", - "prev_obj": "Plot", - "next_obj": "ScatterPlot" - }, - "scatterplot": { - "class": null, - "name": "ScatterPlot", - "description": "Create a scatter plot.

", - "tags": { - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a pandas dataframe with the data to plot.", - "demos": "scatter_plot", - "guides": "creating-a-dashboard-from-bigquery-data" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "pd.DataFrame | Callable | None", - "doc": "The pandas dataframe containing the data to display in a scatter plot, or a callable. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "x", - "annotation": "str | None", - "doc": "Column corresponding to the x axis.", - "default": "None" - }, - { - "name": "y", - "annotation": "str | None", - "doc": "Column corresponding to the y axis.", - "default": "None" - }, - { - "name": "color", - "annotation": "str | None", - "doc": "The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values.", - "default": "None" - }, - { - "name": "size", - "annotation": "str | None", - "doc": "The column used to determine the point size. Should contain numeric data so that gradio can map the data to the point size.", - "default": "None" - }, - { - "name": "shape", - "annotation": "str | None", - "doc": "The column used to determine the point shape. Should contain categorical data. Gradio will map each unique value to a different shape.", - "default": "None" - }, - { - "name": "title", - "annotation": "str | None", - "doc": "The title to display on top of the chart.", - "default": "None" - }, - { - "name": "tooltip", - "annotation": "list[str] | str | None", - "doc": "The column (or list of columns) to display on the tooltip when a user hovers a point on the plot.", - "default": "None" - }, - { - "name": "x_title", - "annotation": "str | None", - "doc": "The title given to the x axis. By default, uses the value of the x parameter.", - "default": "None" - }, - { - "name": "y_title", - "annotation": "str | None", - "doc": "The title given to the y axis. By default, uses the value of the y parameter.", - "default": "None" - }, - { - "name": "color_legend_title", - "annotation": "str | None", - "doc": "The title given to the color legend. By default, uses the value of color parameter.", - "default": "None" - }, - { - "name": "size_legend_title", - "annotation": "str | None", - "doc": "The title given to the size legend. By default, uses the value of the size parameter.", - "default": "None" - }, - { - "name": "shape_legend_title", - "annotation": "str | None", - "doc": "The title given to the shape legend. By default, uses the value of the shape parameter.", - "default": "None" - }, - { - "name": "color_legend_position", - "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", - "doc": "The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", - "default": "None" - }, - { - "name": "size_legend_position", - "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", - "doc": "The position of the size legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", - "default": "None" - }, - { - "name": "shape_legend_position", - "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", - "doc": "The position of the shape legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", - "default": "None" - }, - { - "name": "height", - "annotation": "int | None", - "doc": "The height of the plot in pixels.", - "default": "None" - }, - { - "name": "width", - "annotation": "int | None", - "doc": "The width of the plot in pixels.", - "default": "None" - }, - { - "name": "x_lim", - "annotation": "list[int | float] | None", - "doc": "A tuple or list containing the limits for the x-axis, specified as [x_min, x_max].", - "default": "None" - }, - { - "name": "y_lim", - "annotation": "list[int | float] | None", - "doc": "A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].", - "default": "None" - }, - { - "name": "caption", - "annotation": "str | None", - "doc": "The (optional) caption to display below the plot.", - "default": "None" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.", - "default": "True" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "The (optional) label to display on the top left corner of the plot.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": " If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "Whether the label should be displayed.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": null, - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": null, - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": null, - "default": "160" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "Whether the plot should be visible.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.ScatterPlot" - }, - { - "fn": null, - "name": "clear", - "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.ScatterPlot" - } - ], - "string_shortcuts": [ - ["ScatterPlot", "scatterplot", "Uses default values"] - ], - "demos": [ - [ - "scatter_plot", - "import gradio as gr\nfrom vega_datasets import data\n\ncars = data.cars()\niris = data.iris()\n\n# # Or generate your own fake data\n\n# import pandas as pd\n# import random\n\n# cars_data = {\n# \"Name\": [\"car name \" + f\" {int(i/10)}\" for i in range(400)],\n# \"Miles_per_Gallon\": [random.randint(10, 30) for _ in range(400)],\n# \"Origin\": [random.choice([\"USA\", \"Europe\", \"Japan\"]) for _ in range(400)],\n# \"Horsepower\": [random.randint(50, 250) for _ in range(400)],\n# }\n\n# iris_data = {\n# \"petalWidth\": [round(random.uniform(0, 2.5), 2) for _ in range(150)],\n# \"petalLength\": [round(random.uniform(0, 7), 2) for _ in range(150)],\n# \"species\": [\n# random.choice([\"setosa\", \"versicolor\", \"virginica\"]) for _ in range(150)\n# ],\n# }\n\n# cars = pd.DataFrame(cars_data)\n# iris = pd.DataFrame(iris_data)\n\n\ndef scatter_plot_fn(dataset):\n if dataset == \"iris\":\n return gr.ScatterPlot.update(\n value=iris,\n x=\"petalWidth\",\n y=\"petalLength\",\n color=\"species\",\n title=\"Iris Dataset\",\n color_legend_title=\"Species\",\n x_title=\"Petal Width\",\n y_title=\"Petal Length\",\n tooltip=[\"petalWidth\", \"petalLength\", \"species\"],\n caption=\"\",\n )\n else:\n return gr.ScatterPlot.update(\n value=cars,\n x=\"Horsepower\",\n y=\"Miles_per_Gallon\",\n color=\"Origin\",\n tooltip=\"Name\",\n title=\"Car Data\",\n y_title=\"Miles per Gallon\",\n color_legend_title=\"Origin of Car\",\n caption=\"MPG vs Horsepower of various cars\",\n )\n\n\nwith gr.Blocks() as scatter_plot:\n with gr.Row():\n with gr.Column():\n dataset = gr.Dropdown(choices=[\"cars\", \"iris\"], value=\"cars\")\n with gr.Column():\n plot = gr.ScatterPlot()\n dataset.change(scatter_plot_fn, inputs=dataset, outputs=plot)\n scatter_plot.load(fn=scatter_plot_fn, inputs=dataset, outputs=plot)\n\nif __name__ == \"__main__\":\n scatter_plot.launch()\n" - ] - ], - "guides": [ - { - "name": "creating-a-dashboard-from-bigquery-data", - "category": "tabular-data-science-and-plots", - "pretty_category": "Tabular Data Science And Plots", - "guide_index": null, - "absolute_index": 24, - "pretty_name": "Creating A Dashboard From Bigquery Data", - "content": "# Creating a Real-Time Dashboard from BigQuery Data\n\n\n\n\n[Google BigQuery](https://cloud.google.com/bigquery) is a cloud-based service for processing very large data sets. It is a serverless and highly scalable data warehousing solution that enables users to analyze data [using SQL-like queries](https://www.oreilly.com/library/view/google-bigquery-the/9781492044451/ch01.html).\n\nIn this tutorial, we will show you how to query a BigQuery dataset in Python and display the data in a dashboard that updates in real time using `gradio`. The dashboard will look like this:\n\n\n\nWe'll cover the following steps in this Guide:\n\n1. Setting up your BigQuery credentials\n2. Using the BigQuery client\n3. Building the real-time dashboard (in just *7 lines of Python*)\n\nWe'll be working with the [New York Times' COVID dataset](https://www.nytimes.com/interactive/2021/us/covid-cases.html) that is available as a public dataset on BigQuery. The dataset, named `covid19_nyt.us_counties` contains the latest information about the number of confirmed cases and deaths from COVID across US counties. \n\n**Prerequisites**: This Guide uses [Gradio Blocks](/guides/quickstart/#blocks-more-flexibility-and-control), so make your are familiar with the Blocks class. \n\n## Setting up your BigQuery Credentials\n\nTo use Gradio with BigQuery, you will need to obtain your BigQuery credentials and use them with the [BigQuery Python client](https://pypi.org/project/google-cloud-bigquery/). If you already have BigQuery credentials (as a `.json` file), you can skip this section. If not, you can do this for free in just a couple of minutes.\n\n1. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)\n\n2. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.\n\n3. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"BigQuery API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then the BigQuery is already enabled, and you're all set. \n\n4. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.\n\n5. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. Also grant the service account permissions by giving it a role such as \"BigQuery User\", which will allow you to run queries.\n\n6. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:\n\n```json\n{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n```\n\n## Using the BigQuery Client\n\nOnce you have the credentials, you will need to use the BigQuery Python client to authenticate using your credentials. To do this, you will need to install the BigQuery Python client by running the following command in the terminal:\n\n```bash\npip install google-cloud-bigquery[pandas]\n```\n\nYou'll notice that we've installed the pandas add-on, which will be helpful for processing the BigQuery dataset as a pandas dataframe. Once the client is installed, you can authenticate using your credentials by running the following code:\n\n```py\nfrom google.cloud import bigquery\n\nclient = bigquery.Client.from_service_account_json(\"path/to/key.json\")\n```\n\nWith your credentials authenticated, you can now use the BigQuery Python client to interact with your BigQuery datasets. \n\nHere is an example of a function which queries the `covid19_nyt.us_counties` dataset in BigQuery to show the top 20 counties with the most confirmed cases as of the current day:\n\n```py\nimport numpy as np\n\nQUERY = (\n 'SELECT * FROM `bigquery-public-data.covid19_nyt.us_counties` ' \n 'ORDER BY date DESC,confirmed_cases DESC '\n 'LIMIT 20')\n\ndef run_query():\n query_job = client.query(QUERY) \n query_result = query_job.result() \n df = query_result.to_dataframe()\n # Select a subset of columns \n df = df[[\"confirmed_cases\", \"deaths\", \"county\", \"state_name\"]]\n # Convert numeric columns to standard numpy types\n df = df.astype({\"deaths\": np.int64, \"confirmed_cases\": np.int64})\n return df\n```\n\n## Building the Real-Time Dashboard\n\nOnce you have a function to query the data, you can use the `gr.DataFrame` component from the Gradio library to display the results in a tabular format. This is a useful way to inspect the data and make sure that it has been queried correctly.\n\nHere is an example of how to use the `gr.DataFrame` component to display the results. By passing in the `run_query` function to `gr.DataFrame`, we instruct Gradio to run the function as soon as the page loads and show the results. In addition, you also pass in the keyword `every` to tell the dashboard to refresh every hour (60*60 seconds).\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.DataFrame(run_query, every=60*60)\n\ndemo.queue().launch() # Run the demo using queuing\n```\n\nPerhaps you'd like to add a visualization to our dashboard. You can use the `gr.ScatterPlot()` component to visualize the data in a scatter plot. This allows you to see the relationship between different variables such as case count and case deaths in the dataset and can be useful for exploring the data and gaining insights. Again, we can do this in real-time\nby passing in the `every` parameter. \n\nHere is a complete example showing how to use the `gr.ScatterPlot` to visualize in addition to displaying data with the `gr.DataFrame`\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# \ud83d\udc89 Covid Dashboard (Updated Hourly)\")\n with gr.Row():\n gr.DataFrame(run_query, every=60*60)\n gr.ScatterPlot(run_query, every=60*60, x=\"confirmed_cases\", \n y=\"deaths\", tooltip=\"county\", width=500, height=500)\n\ndemo.queue().launch() # Run the demo with queuing enabled\n```", - "html": "

Creating a Real-Time Dashboard from BigQuery Data

\n\n

Google BigQuery is a cloud-based service for processing very large data sets. It is a serverless and highly scalable data warehousing solution that enables users to analyze data using SQL-like queries.

\n\n

In this tutorial, we will show you how to query a BigQuery dataset in Python and display the data in a dashboard that updates in real time using gradio. The dashboard will look like this:

\n\n

\n\n

We'll cover the following steps in this Guide:

\n\n
    \n
  1. Setting up your BigQuery credentials
  2. \n
  3. Using the BigQuery client
  4. \n
  5. Building the real-time dashboard (in just 7 lines of Python)
  6. \n
\n\n

We'll be working with the New York Times' COVID dataset that is available as a public dataset on BigQuery. The dataset, named covid19_nyt.us_counties contains the latest information about the number of confirmed cases and deaths from COVID across US counties.

\n\n

Prerequisites: This Guide uses Gradio Blocks, so make your are familiar with the Blocks class.

\n\n

Setting up your BigQuery Credentials

\n\n

To use Gradio with BigQuery, you will need to obtain your BigQuery credentials and use them with the BigQuery Python client. If you already have BigQuery credentials (as a .json file), you can skip this section. If not, you can do this for free in just a couple of minutes.

\n\n
    \n
  1. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)

  2. \n
  3. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.

  4. \n
  5. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"BigQuery API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then the BigQuery is already enabled, and you're all set.

  6. \n
  7. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.

  8. \n
  9. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. Also grant the service account permissions by giving it a role such as \"BigQuery User\", which will allow you to run queries.

  10. \n
  11. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:

  12. \n
\n\n
{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\":  \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n
\n\n

Using the BigQuery Client

\n\n

Once you have the credentials, you will need to use the BigQuery Python client to authenticate using your credentials. To do this, you will need to install the BigQuery Python client by running the following command in the terminal:

\n\n
pip install google-cloud-bigquery[pandas]\n
\n\n

You'll notice that we've installed the pandas add-on, which will be helpful for processing the BigQuery dataset as a pandas dataframe. Once the client is installed, you can authenticate using your credentials by running the following code:

\n\n
from google.cloud import bigquery\n\nclient = bigquery.Client.from_service_account_json(\"path/to/key.json\")\n
\n\n

With your credentials authenticated, you can now use the BigQuery Python client to interact with your BigQuery datasets.

\n\n

Here is an example of a function which queries the covid19_nyt.us_counties dataset in BigQuery to show the top 20 counties with the most confirmed cases as of the current day:

\n\n
import numpy as np\n\nQUERY = (\n    'SELECT * FROM `bigquery-public-data.covid19_nyt.us_counties` ' \n    'ORDER BY date DESC,confirmed_cases DESC '\n    'LIMIT 20')\n\ndef run_query():\n    query_job = client.query(QUERY)  \n    query_result = query_job.result()  \n    df = query_result.to_dataframe()\n    # Select a subset of columns \n    df = df[[\"confirmed_cases\", \"deaths\", \"county\", \"state_name\"]]\n    # Convert numeric columns to standard numpy types\n    df = df.astype({\"deaths\": np.int64, \"confirmed_cases\": np.int64})\n    return df\n
\n\n

Building the Real-Time Dashboard

\n\n

Once you have a function to query the data, you can use the gr.DataFrame component from the Gradio library to display the results in a tabular format. This is a useful way to inspect the data and make sure that it has been queried correctly.

\n\n

Here is an example of how to use the gr.DataFrame component to display the results. By passing in the run_query function to gr.DataFrame, we instruct Gradio to run the function as soon as the page loads and show the results. In addition, you also pass in the keyword every to tell the dashboard to refresh every hour (60*60 seconds).

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.DataFrame(run_query, every=60*60)\n\ndemo.queue().launch()  # Run the demo using queuing\n
\n\n

Perhaps you'd like to add a visualization to our dashboard. You can use the gr.ScatterPlot() component to visualize the data in a scatter plot. This allows you to see the relationship between different variables such as case count and case deaths in the dataset and can be useful for exploring the data and gaining insights. Again, we can do this in real-time\nby passing in the every parameter.

\n\n

Here is a complete example showing how to use the gr.ScatterPlot to visualize in addition to displaying data with the gr.DataFrame

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"# \ud83d\udc89 Covid Dashboard (Updated Hourly)\")\n    with gr.Row():\n        gr.DataFrame(run_query, every=60*60)\n        gr.ScatterPlot(run_query, every=60*60, x=\"confirmed_cases\", \n                        y=\"deaths\", tooltip=\"county\", width=500, height=500)\n\ndemo.queue().launch()  # Run the demo with queuing enabled\n
\n", - "tags": ["TABULAR", "DASHBOARD", "PLOTS "], - "spaces": [], - "url": "/guides/creating-a-dashboard-from-bigquery-data/", - "contributor": null - } - ], - "preprocessing": "this component does *not* accept input.", - "postprocessing": "expects a pandas dataframe with the data to plot.", - "parent": "gradio", - "prev_obj": "Radio", - "next_obj": "Slider" - }, - "slider": { - "class": null, - "name": "Slider", - "description": "Creates a slider that ranges from `minimum` to `maximum` with a step size of `step`.
", - "tags": { - "preprocessing": "passes slider value as a {float} into the function.", - "postprocessing": "expects an {int} or {float} returned from function and sets slider value to it as long as it is within range.", - "examples-format": "A {float} or {int} representing the slider's value.", - "demos": "sentence_builder, slider_release, generate_tone, titanic_survival, interface_random_slider, blocks_random_slider", - "guides": "create-your-own-friends-with-a-gan" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "minimum", - "annotation": "float", - "doc": "minimum value for slider.", - "default": "0" - }, - { - "name": "maximum", - "annotation": "float", - "doc": "maximum value for slider.", - "default": "100" - }, - { - "name": "value", - "annotation": "float | Callable | None", - "doc": "default value. If callable, the function will be called whenever the app loads to set the initial value of the component. Ignored if randomized=True.", - "default": "None" - }, - { - "name": "step", - "annotation": "float | None", - "doc": "increment between slider values.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "info", - "annotation": "str | None", - "doc": "additional component description.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, slider will be adjustable; if False, adjusting will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "randomize", - "annotation": "bool", - "doc": "If True, the value of the slider when the app loads is taken uniformly at random from the range given by the minimum and maximum.", - "default": "False" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Slider" - }, - { - "fn": null, - "name": "input", - "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Slider" - }, - { - "fn": null, - "name": "release", - "description": "This listener is triggered when the user releases the mouse on this component (e.g. when the user releases the slider). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Slider" - } - ], - "string_shortcuts": [["Slider", "slider", "Uses default values"]], - "demos": [ - [ - "sentence_builder", - "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "slider_release", - "import gradio as gr\n\n\ndef identity(x, state):\n state += 1\n return x, state, state\n\n\nwith gr.Blocks() as demo:\n slider = gr.Slider(0, 100, step=0.1)\n state = gr.State(value=0)\n with gr.Row():\n number = gr.Number(label=\"On release\")\n number2 = gr.Number(label=\"Number of events fired\")\n slider.release(identity, inputs=[slider, state], outputs=[number, state, number2], api_name=\"predict\")\n\nif __name__ == \"__main__\":\n print(\"here\")\n demo.launch()\n print(demo.server_port)\n" - ], - [ - "generate_tone", - "import numpy as np\nimport gradio as gr\n\nnotes = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n\ndef generate_tone(note, octave, duration):\n sr = 48000\n a4_freq, tones_from_a4 = 440, 12 * (octave - 4) + (note - 9)\n frequency = a4_freq * 2 ** (tones_from_a4 / 12)\n duration = int(duration)\n audio = np.linspace(0, duration, duration * sr)\n audio = (20000 * np.sin(audio * (2 * np.pi * frequency))).astype(np.int16)\n return sr, audio\n\ndemo = gr.Interface(\n generate_tone,\n [\n gr.Dropdown(notes, type=\"index\"),\n gr.Slider(4, 6, step=1),\n gr.Textbox(value=1, label=\"Duration in seconds\"),\n ],\n \"audio\",\n)\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "titanic_survival", - "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "interface_random_slider", - "import gradio as gr\n\n\ndef func(slider_1, slider_2, *args):\n return slider_1 + slider_2 * 5\n\n\ndemo = gr.Interface(\n func,\n [\n gr.Slider(minimum=1.5, maximum=250000.89, randomize=True, label=\"Random Big Range\"),\n gr.Slider(minimum=-1, maximum=1, randomize=True, step=0.05, label=\"Random only multiple of 0.05 allowed\"),\n gr.Slider(minimum=0, maximum=1, randomize=True, step=0.25, label=\"Random only multiples of 0.25 allowed\"),\n gr.Slider(minimum=-100, maximum=100, randomize=True, step=3, label=\"Random between -100 and 100 step 3\"),\n gr.Slider(minimum=-100, maximum=100, randomize=True, label=\"Random between -100 and 100\"),\n gr.Slider(value=0.25, minimum=5, maximum=30, step=-1),\n ],\n \"number\",\n interpretation=\"default\"\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "blocks_random_slider", - "\nimport gradio as gr\n\n\ndef func(slider_1, slider_2):\n return slider_1 * 5 + slider_2\n\n\nwith gr.Blocks() as demo:\n slider = gr.Slider(minimum=-10.2, maximum=15, label=\"Random Slider (Static)\", randomize=True)\n slider_1 = gr.Slider(minimum=100, maximum=200, label=\"Random Slider (Input 1)\", randomize=True)\n slider_2 = gr.Slider(minimum=10, maximum=23.2, label=\"Random Slider (Input 2)\", randomize=True)\n slider_3 = gr.Slider(value=3, label=\"Non random slider\")\n btn = gr.Button(\"Run\")\n btn.click(func, inputs=[slider_1, slider_2], outputs=gr.Number())\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [ - { - "name": "create-your-own-friends-with-a-gan", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 34, - "pretty_name": "Create Your Own Friends With A Gan", - "content": "# Create Your Own Friends with a GAN\n\n\n\n\n\n\n\n## Introduction\n\nIt seems that cryptocurrencies, [NFTs](https://www.nytimes.com/interactive/2022/03/18/technology/nft-guide.html), and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets [may be taxable, such as in Canada](https://www.canada.ca/en/revenue-agency/programs/about-canada-revenue-agency-cra/compliance/digital-currency/cryptocurrency-guide.html), today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated [CryptoPunks](https://www.larvalabs.com/cryptopunks).\n\nGenerative Adversarial Networks, often known just as *GANs*, are a specific class of deep-learning models that are designed to learn from an input dataset to create (*generate!*) new material that is convincingly similar to elements of the original training set. Famously, the website [thispersondoesnotexist.com](https://thispersondoesnotexist.com/) went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even [music](https://salu133445.github.io/musegan/)!\n\nToday we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:\n\n\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained model, also install `torch` and `torchvision`.\n\n## GANs: a very brief introduction\n\nOriginally proposed in [Goodfellow et al. 2014](https://arxiv.org/abs/1406.2661), GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the *generator*, is responsible for generating images. The other network, the *discriminator*, receives an image at a time from the generator along with a **real** image from the training data set. The discriminator then has to guess: which image is the fake?\n\nThe generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (*adversarial!*) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!\n\nFor a more in-depth look at GANs, you can take a look at [this excellent post on Analytics Vidhya](https://www.analyticsvidhya.com/blog/2021/06/a-detailed-explanation-of-gan-with-implementation-using-tensorflow-and-keras/) or this [PyTorch tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html). For now, though, we'll dive into a demo!\n\n## Step 1 \u2014 Create the Generator model\n\nTo generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:\n\n```python\nfrom torch import nn\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n```\n\nWe're taking the generator from [this repo by @teddykoker](https://github.com/teddykoker/cryptopunks-gan/blob/main/train.py#L90), where you can also see the original discriminator model structure.\n\nAfter instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at [nateraw/cryptopunks-gan](https://huggingface.co/nateraw/cryptopunks-gan):\n\n```python\nfrom huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n```\n\n## Step 2 \u2014 Defining a `predict` function\n\nThe `predict` function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our `predict` function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use `torchvision`'s `save_image` function to save the output of the model as a `png` file, and return the file name:\n\n```python\nfrom torchvision.utils import save_image\n\ndef predict(seed):\n num_punks = 4\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWe're giving our `predict` function a `seed` parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.\n\n*Note!* Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.\n\n## Step 3 \u2014 Creating a Gradio interface\n\nAt this point you can even run the code you have with `predict()`, and you'll find your freshly generated punks in your file system at `./punks.png`. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:\n\n* Set a slider input so users can choose the \"seed\" value\n* Use an image component for our output to showcase the generated punks\n* Use our `predict()` to take the seed and generate the images\n\nWith `gr.Interface()`, we can define all of that with a single function call:\n\n```python\nimport gradio as gr\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n ],\n outputs=\"image\",\n).launch()\n```\n\nLaunching the interface should present you with something like this:\n\n\n\n## Step 4 \u2014 Even more punks!\n\nGenerating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the `inputs` list that we pass to `gr.Interface`:\n\n```python\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n ],\n outputs=\"image\",\n).launch()\n```\n\nThe new input will be passed to our `predict()` function, so we have to make some changes to that function to accept a new parameter:\n\n```python\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWhen you relaunch your interface, you should see a second slider that'll let you control the number of punks!\n\n## Step 5 - Polishing it up\n\nYour Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728\n\nWe can add some examples that users can easily try out by adding this to the `gr.Interface`:\n\n```python\ngr.Interface(\n # ...\n # keep everything as it is, and then add\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n```\n\nThe `examples` parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the `inputs`. So in our case, `[seed, num_punks]`. Give it a try!\n\nYou can also try adding a `title`, `description`, and `article` to the `gr.Interface`. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 `article` will also accept HTML, as [explored in a previous guide](/guides/key-features/#descriptive-content)!\n\nWhen you're all done, you may end up with something like this:\n\n\n\nFor reference, here is our full code:\n\n```python\nimport torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n ],\n outputs=\"image\",\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n```\n----------\n\nCongratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can [scour the Hub for more GANs](https://huggingface.co/models?other=gan) (or train your own) and continue making even more awesome demos \ud83e\udd17", - "html": "

Create Your Own Friends with a GAN

\n\n

Introduction

\n\n

It seems that cryptocurrencies, NFTs, and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets may be taxable, such as in Canada, today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated CryptoPunks.

\n\n

Generative Adversarial Networks, often known just as GANs, are a specific class of deep-learning models that are designed to learn from an input dataset to create (generate!) new material that is convincingly similar to elements of the original training set. Famously, the website thispersondoesnotexist.com went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even music!

\n\n

Today we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:

\n\n\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained model, also install torch and torchvision.

\n\n

GANs: a very brief introduction

\n\n

Originally proposed in Goodfellow et al. 2014, GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the generator, is responsible for generating images. The other network, the discriminator, receives an image at a time from the generator along with a real image from the training data set. The discriminator then has to guess: which image is the fake?

\n\n

The generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (adversarial!) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!

\n\n

For a more in-depth look at GANs, you can take a look at this excellent post on Analytics Vidhya or this PyTorch tutorial. For now, though, we'll dive into a demo!

\n\n

Step 1 \u2014 Create the Generator model

\n\n

To generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:

\n\n
from torch import nn\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n
\n\n

We're taking the generator from this repo by @teddykoker, where you can also see the original discriminator model structure.

\n\n

After instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at nateraw/cryptopunks-gan:

\n\n
from huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n
\n\n

Step 2 \u2014 Defining a predict function

\n\n

The predict function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our predict function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use torchvision's save_image function to save the output of the model as a png file, and return the file name:

\n\n
from torchvision.utils import save_image\n\ndef predict(seed):\n    num_punks = 4\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

We're giving our predict function a seed parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.

\n\n

Note! Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.

\n\n

Step 3 \u2014 Creating a Gradio interface

\n\n

At this point you can even run the code you have with predict(<SOME_NUMBER>), and you'll find your freshly generated punks in your file system at ./punks.png. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:

\n\n
    \n
  • Set a slider input so users can choose the \"seed\" value
  • \n
  • Use an image component for our output to showcase the generated punks
  • \n
  • Use our predict() to take the seed and generate the images
  • \n
\n\n

With gr.Interface(), we can define all of that with a single function call:

\n\n
import gradio as gr\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

Launching the interface should present you with something like this:

\n\n\n\n

Step 4 \u2014 Even more punks!

\n\n

Generating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the inputs list that we pass to gr.Interface:

\n\n
gr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

The new input will be passed to our predict() function, so we have to make some changes to that function to accept a new parameter:

\n\n
def predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

When you relaunch your interface, you should see a second slider that'll let you control the number of punks!

\n\n

Step 5 - Polishing it up

\n\n

Your Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728

\n\n

We can add some examples that users can easily try out by adding this to the gr.Interface:

\n\n
gr.Interface(\n    # ...\n    # keep everything as it is, and then add\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n
\n\n

The examples parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the inputs. So in our case, [seed, num_punks]. Give it a try!

\n\n

You can also try adding a title, description, and article to the gr.Interface. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 article will also accept HTML, as explored in a previous guide!

\n\n

When you're all done, you may end up with something like this:

\n\n\n\n

For reference, here is our full code:

\n\n
import torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n    ],\n    outputs=\"image\",\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n
\n\n
\n\n

Congratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can scour the Hub for more GANs (or train your own) and continue making even more awesome demos \ud83e\udd17

\n", - "tags": ["GAN", "IMAGE", "HUB"], - "spaces": [ - "https://huggingface.co/spaces/NimaBoscarino/cryptopunks", - "https://huggingface.co/spaces/nateraw/cryptopunks-generator" - ], - "url": "/guides/create-your-own-friends-with-a-gan/", - "contributor": "Nima Boscarino and Nate Raw" - } - ], - "preprocessing": "passes slider value as a float into the function.", - "postprocessing": "expects an int or float returned from function and sets slider value to it as long as it is within range.", - "examples-format": "A float or int representing the slider's value.", - "parent": "gradio", - "prev_obj": "ScatterPlot", - "next_obj": "State" - }, - "state": { - "class": null, - "name": "State", - "description": "Special hidden component that stores session state across runs of the demo by the same user. The value of the State variable is cleared when the user refreshes the page.
", - "tags": { - "preprocessing": "No preprocessing is performed", - "postprocessing": "No postprocessing is performed", - "demos": "blocks_simple_squares", - "guides": "real-time-speech-recognition" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "Any", - "doc": "the initial value (of arbitrary type) of the state. The provided argument is deepcopied. If a callable is provided, the function will be called whenever the app loads to set the initial value of the state.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [], - "demos": [ - [ - "blocks_simple_squares", - "import gradio as gr\n\ndemo = gr.Blocks(css=\"\"\"#btn {color: red} .abc {font-family: \"Comic Sans MS\", \"Comic Sans\", cursive !important}\"\"\")\n\nwith demo:\n default_json = {\"a\": \"a\"}\n\n num = gr.State(value=0)\n squared = gr.Number(value=0)\n btn = gr.Button(\"Next Square\", elem_id=\"btn\", elem_classes=[\"abc\", \"def\"])\n\n stats = gr.State(value=default_json)\n table = gr.JSON()\n\n def increase(var, stats_history):\n var += 1\n stats_history[str(var)] = var**2\n return var, var**2, stats_history, stats_history\n\n btn.click(increase, [num, stats], [num, squared, stats, table])\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [ - { - "name": "real-time-speech-recognition", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 40, - "pretty_name": "Real Time Speech Recognition", - "content": "# Real Time Speech Recognition \n\n\n\n\n## Introduction\n\nAutomatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).\n\nUsing `gradio`, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.\n\nThis tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a ***full-context*** model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it ***streaming***, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused)!):\n\n\n\nReal-time ASR is inherently *stateful*, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use **state** with Gradio demos. \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:\n\n* Transformers (for this, `pip install transformers` and `pip install torch`) \n* DeepSpeech (`pip install deepspeech==0.8.2`)\n\nMake sure you have at least one of these installed so that you can follow along the tutorial. You will also need `ffmpeg` [installed on your system](https://www.ffmpeg.org/download.html), if you do not already have it, to process files from the microphone.\n\nHere's how to build a real time speech recognition (ASR) app: \n\n1. [Set up the Transformers ASR Model](#1-set-up-the-transformers-asr-model)\n2. [Create a Full-Context ASR Demo with Transformers](#2-create-a-full-context-asr-demo-with-transformers) \n3. [Create a Streaming ASR Demo with Transformers](#3-create-a-streaming-asr-demo-with-transformers)\n4. [Create a Streaming ASR Demo with DeepSpeech](#4-create-a-streaming-asr-demo-with-deep-speech)\n\n\n## 1. Set up the Transformers ASR Model\n\nFirst, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, `Wav2Vec2`. \n\nHere is the code to load `Wav2Vec2` from Hugging Face `transformers`.\n\n```python\nfrom transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n```\n\nThat's it! By default, the automatic speech recognition model pipeline loads Facebook's `facebook/wav2vec2-base-960h` model.\n\n## 2. Create a Full-Context ASR Demo with Transformers \n\nWe will start by creating a *full-context* ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the `pipeline` object above.\n\nWe will use `gradio`'s built in `Audio` component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain `Textbox`.\n\n```python\nimport gradio as gr\n\ndef transcribe(audio):\n text = p(audio)[\"text\"]\n return text\n\ngr.Interface(\n fn=transcribe, \n inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n outputs=\"text\").launch()\n```\n\nSo what's happening here? The `transcribe` function takes a single parameter, `audio`, which is a filepath to the audio file that the user has recorded. The `pipeline` object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox. \n\nLet's see it in action! (Record a short audio clip and then click submit, or [open in a new tab](https://huggingface.co/spaces/abidlabs/full-context-asr)):\n\n\n\n## 3. Create a Streaming ASR Demo with Transformers\n\nOk great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a *streaming* interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.\n\nThe good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same `Wav2Vec2` model. \n\nThe biggest change is that we must now introduce a `state` parameter, which holds the audio that has been *transcribed so far*. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed. \n\nWhen adding state to a Gradio demo, you need to do a total of 3 things:\n\n* Add a `state` parameter to the function\n* Return the updated `state` at the end of the function\n* Add the `\"state\"` components to the `inputs` and `outputs` in `Interface` \n\nHere's what the code looks like:\n\n```python\ndef transcribe(audio, state=\"\"):\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\" \n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nNotice that we've also made one other change, which is that we've set `live=True`. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.\n\nLet's see how it does (try below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr))!\n\n\n\n\nOne thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the `transcribe()` function so that longer audio chunks are processed. We can do this by adding a `time.sleep()` inside the function, as shown below (we'll see a proper fix next) \n\n```python\nfrom transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n time.sleep(2)\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\"\n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nTry the demo below to see the difference (or [open in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused))!\n\n\n\n\n## 4. Create a Streaming ASR Demo with DeepSpeech\n\nYou're not restricted to ASR models from the `transformers` library -- you can use your own models or models from other libraries. The `DeepSpeech` library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.\n\nGoing through the DeepSpeech library is beyond the scope of this Guide (check out their [excellent documentation here](https://deepspeech.readthedocs.io/en/r0.9/)), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model. \n\nHere's a complete example (on Linux):\n\nFirst install the DeepSpeech library and download the pretrained models from the terminal:\n\n```bash\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n```\n\nThen, create a similar `transcribe()` function as before:\n\n```python\nfrom deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n if sr not in (\n 48000,\n 16000,\n ): # Deepspeech only supports 16k, (we convert 48k -> 16k)\n raise ValueError(\"Unsupported rate\", sr)\n if sr == 48000:\n y = (\n ((y / max(np.max(y), 1)) * 32767)\n .reshape((-1, 3))\n .mean(axis=1)\n .astype(\"int16\")\n )\n sr = 16000\n return sr, y\n\n\ndef transcribe(speech, stream):\n _, y = reformat_freq(*speech)\n if stream is None:\n stream = model.createStream()\n stream.feedAudioContent(y)\n text = stream.intermediateDecode()\n return text, stream\n\n```\n\nThen, create a Gradio Interface as before (the only difference being that the return type should be `numpy` instead of a `filepath` to be compatible with the DeepSpeech models)\n\n```python\nimport gradio as gr\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"numpy\"), \n \"state\" \n ], \n outputs= [\n \"text\", \n \"state\"\n ], \n live=True).launch()\n```\n\nRunning all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.\n\n--------------------------------------------\n\n\nAnd you're done! That's all the code you need to build a web-based GUI for your ASR model. \n\nFun tip: you can share your ASR model instantly with others simply by setting `share=True` in `launch()`. \n\n\n", - "html": "

Real Time Speech Recognition

\n\n

Introduction

\n\n

Automatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).

\n\n

Using gradio, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.

\n\n

This tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a full-context model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it streaming, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or in a new tab!):

\n\n\n\n

Real-time ASR is inherently stateful, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use state with Gradio demos.

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:

\n\n
    \n
  • Transformers (for this, pip install transformers and pip install torch)
  • \n
  • DeepSpeech (pip install deepspeech==0.8.2)
  • \n
\n\n

Make sure you have at least one of these installed so that you can follow along the tutorial. You will also need ffmpeg installed on your system, if you do not already have it, to process files from the microphone.

\n\n

Here's how to build a real time speech recognition (ASR) app:

\n\n
    \n
  1. Set up the Transformers ASR Model
  2. \n
  3. Create a Full-Context ASR Demo with Transformers
  4. \n
  5. Create a Streaming ASR Demo with Transformers
  6. \n
  7. Create a Streaming ASR Demo with DeepSpeech
  8. \n
\n\n

1. Set up the Transformers ASR Model

\n\n

First, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, Wav2Vec2.

\n\n

Here is the code to load Wav2Vec2 from Hugging Face transformers.

\n\n
from transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n
\n\n

That's it! By default, the automatic speech recognition model pipeline loads Facebook's facebook/wav2vec2-base-960h model.

\n\n

2. Create a Full-Context ASR Demo with Transformers

\n\n

We will start by creating a full-context ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the pipeline object above.

\n\n

We will use gradio's built in Audio component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain Textbox.

\n\n
import gradio as gr\n\ndef transcribe(audio):\n    text = p(audio)[\"text\"]\n    return text\n\ngr.Interface(\n    fn=transcribe, \n    inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n    outputs=\"text\").launch()\n
\n\n

So what's happening here? The transcribe function takes a single parameter, audio, which is a filepath to the audio file that the user has recorded. The pipeline object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox.

\n\n

Let's see it in action! (Record a short audio clip and then click submit, or open in a new tab):

\n\n\n\n

3. Create a Streaming ASR Demo with Transformers

\n\n

Ok great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a streaming interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.

\n\n

The good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same Wav2Vec2 model.

\n\n

The biggest change is that we must now introduce a state parameter, which holds the audio that has been transcribed so far. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed.

\n\n

When adding state to a Gradio demo, you need to do a total of 3 things:

\n\n
    \n
  • Add a state parameter to the function
  • \n
  • Return the updated state at the end of the function
  • \n
  • Add the \"state\" components to the inputs and outputs in Interface
  • \n
\n\n

Here's what the code looks like:

\n\n
def transcribe(audio, state=\"\"):\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\" \n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Notice that we've also made one other change, which is that we've set live=True. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.

\n\n

Let's see how it does (try below or in a new tab)!

\n\n\n\n

One thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the transcribe() function so that longer audio chunks are processed. We can do this by adding a time.sleep() inside the function, as shown below (we'll see a proper fix next)

\n\n
from transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n    time.sleep(2)\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\"\n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Try the demo below to see the difference (or open in a new tab)!

\n\n\n\n

4. Create a Streaming ASR Demo with DeepSpeech

\n\n

You're not restricted to ASR models from the transformers library -- you can use your own models or models from other libraries. The DeepSpeech library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.

\n\n

Going through the DeepSpeech library is beyond the scope of this Guide (check out their excellent documentation here), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model.

\n\n

Here's a complete example (on Linux):

\n\n

First install the DeepSpeech library and download the pretrained models from the terminal:

\n\n
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n
\n\n

Then, create a similar transcribe() function as before:

\n\n
from deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n    if sr not in (\n        48000,\n        16000,\n    ):  # Deepspeech only supports 16k, (we convert 48k -> 16k)\n        raise ValueError(\"Unsupported rate\", sr)\n    if sr == 48000:\n        y = (\n            ((y / max(np.max(y), 1)) * 32767)\n            .reshape((-1, 3))\n            .mean(axis=1)\n            .astype(\"int16\")\n        )\n        sr = 16000\n    return sr, y\n\n\ndef transcribe(speech, stream):\n    _, y = reformat_freq(*speech)\n    if stream is None:\n        stream = model.createStream()\n    stream.feedAudioContent(y)\n    text = stream.intermediateDecode()\n    return text, stream\n\n
\n\n

Then, create a Gradio Interface as before (the only difference being that the return type should be numpy instead of a filepath to be compatible with the DeepSpeech models)

\n\n
import gradio as gr\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"numpy\"), \n        \"state\" \n    ], \n    outputs= [\n        \"text\", \n        \"state\"\n    ], \n    live=True).launch()\n
\n\n

Running all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.

\n\n
\n\n

And you're done! That's all the code you need to build a web-based GUI for your ASR model.

\n\n

Fun tip: you can share your ASR model instantly with others simply by setting share=True in launch().

\n", - "tags": ["ASR", "SPEECH", "STREAMING"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/streaming-asr-paused", - "https://huggingface.co/spaces/abidlabs/full-context-asr" - ], - "url": "/guides/real-time-speech-recognition/", - "contributor": null - } - ], - "preprocessing": "No preprocessing is performed", - "postprocessing": "No postprocessing is performed", - "parent": "gradio", - "prev_obj": "Slider", - "next_obj": "Textbox" - }, - "textbox": { - "class": null, - "name": "Textbox", - "description": "Creates a textarea for user to enter string input or display string output.
", - "tags": { - "preprocessing": "passes textarea value as a {str} into the function.", - "postprocessing": "expects a {str} returned from function and sets textarea value to it.", - "examples-format": "a {str} representing the textbox input.", - "demos": "hello_world, diff_texts, sentence_builder", - "guides": "creating-a-chatbot, real-time-speech-recognition" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | Callable | None", - "doc": "default text to provide in textarea. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "\"\"" - }, - { - "name": "lines", - "annotation": "int", - "doc": "minimum number of line rows to provide in textarea.", - "default": "1" - }, - { - "name": "max_lines", - "annotation": "int", - "doc": "maximum number of line rows to provide in textarea.", - "default": "20" - }, - { - "name": "placeholder", - "annotation": "str | None", - "doc": "placeholder hint to provide behind textarea.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "info", - "annotation": "str | None", - "doc": "additional component description.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, will be rendered as an editable textbox; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "autofocus", - "annotation": "bool", - "doc": "If True, will focus on the textbox when the page loads.", - "default": "False" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "type", - "annotation": "Literal['text', 'password', 'email']", - "doc": "The type of textbox. One of: 'text', 'password', 'email', Default is 'text'.", - "default": "\"text\"" - }, - { - "name": "text_align", - "annotation": "Literal['left', 'right'] | None", - "doc": "How to align the text in the textbox, can be: \"left\", \"right\", or None (default). If None, the alignment is left if `rtl` is False, or right if `rtl` is True. Can only be changed if `type` is \"text\".", - "default": "None" - }, - { - "name": "rtl", - "annotation": "bool", - "doc": "If True and `type` is \"text\", sets the direction of the text to right-to-left (cursor appears on the left of the text). Default is False, which renders cursor on the right.", - "default": "False" - }, - { - "name": "show_copy_button", - "annotation": "bool", - "doc": "If True, includes a copy button to copy the text in the textbox. Only applies if show_label is True.", - "default": "False" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Textbox" - }, - { - "fn": null, - "name": "input", - "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Textbox" - }, - { - "fn": null, - "name": "submit", - "description": "This listener is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Textbox" - }, - { - "fn": null, - "name": "focus", - "description": "This listener is triggered when the component is focused (e.g. when the user clicks inside a textbox). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Textbox" - }, - { - "fn": null, - "name": "blur", - "description": "This listener is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Textbox" - }, - { - "fn": null, - "name": "select", - "description": "Event listener for when the user selects text in the Textbox. Uses event data gradio.SelectData to carry `value` referring to selected substring, and `index` tuple referring to selected range endpoints. See EventData documentation on how to use this event data.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Textbox" - } - ], - "string_shortcuts": [ - ["Textbox", "textbox", "Uses default values"], - ["TextArea", "textarea", "Uses lines=7"] - ], - "demos": [ - [ - "hello_world", - "import gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n \nif __name__ == \"__main__\":\n demo.launch() " - ], - [ - "diff_texts", - "from difflib import Differ\n\nimport gradio as gr\n\n\ndef diff_texts(text1, text2):\n d = Differ()\n return [\n (token[2:], token[0] if token[0] != \" \" else None)\n for token in d.compare(text1, text2)\n ]\n\n\ndemo = gr.Interface(\n diff_texts,\n [\n gr.Textbox(\n label=\"Text 1\",\n info=\"Initial text\",\n lines=3,\n value=\"The quick brown fox jumped over the lazy dogs.\",\n ),\n gr.Textbox(\n label=\"Text 2\",\n info=\"Text to compare\",\n lines=3,\n value=\"The fast brown fox jumps over lazy dogs.\",\n ),\n ],\n gr.HighlightedText(\n label=\"Diff\",\n combine_adjacent=True,\n show_legend=True,\n ).style(color_map={\"+\": \"red\", \"-\": \"green\"}),\n theme=gr.themes.Base()\n)\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "sentence_builder", - "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [ - { - "name": "real-time-speech-recognition", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 40, - "pretty_name": "Real Time Speech Recognition", - "content": "# Real Time Speech Recognition \n\n\n\n\n## Introduction\n\nAutomatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).\n\nUsing `gradio`, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.\n\nThis tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a ***full-context*** model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it ***streaming***, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused)!):\n\n\n\nReal-time ASR is inherently *stateful*, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use **state** with Gradio demos. \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:\n\n* Transformers (for this, `pip install transformers` and `pip install torch`) \n* DeepSpeech (`pip install deepspeech==0.8.2`)\n\nMake sure you have at least one of these installed so that you can follow along the tutorial. You will also need `ffmpeg` [installed on your system](https://www.ffmpeg.org/download.html), if you do not already have it, to process files from the microphone.\n\nHere's how to build a real time speech recognition (ASR) app: \n\n1. [Set up the Transformers ASR Model](#1-set-up-the-transformers-asr-model)\n2. [Create a Full-Context ASR Demo with Transformers](#2-create-a-full-context-asr-demo-with-transformers) \n3. [Create a Streaming ASR Demo with Transformers](#3-create-a-streaming-asr-demo-with-transformers)\n4. [Create a Streaming ASR Demo with DeepSpeech](#4-create-a-streaming-asr-demo-with-deep-speech)\n\n\n## 1. Set up the Transformers ASR Model\n\nFirst, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, `Wav2Vec2`. \n\nHere is the code to load `Wav2Vec2` from Hugging Face `transformers`.\n\n```python\nfrom transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n```\n\nThat's it! By default, the automatic speech recognition model pipeline loads Facebook's `facebook/wav2vec2-base-960h` model.\n\n## 2. Create a Full-Context ASR Demo with Transformers \n\nWe will start by creating a *full-context* ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the `pipeline` object above.\n\nWe will use `gradio`'s built in `Audio` component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain `Textbox`.\n\n```python\nimport gradio as gr\n\ndef transcribe(audio):\n text = p(audio)[\"text\"]\n return text\n\ngr.Interface(\n fn=transcribe, \n inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n outputs=\"text\").launch()\n```\n\nSo what's happening here? The `transcribe` function takes a single parameter, `audio`, which is a filepath to the audio file that the user has recorded. The `pipeline` object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox. \n\nLet's see it in action! (Record a short audio clip and then click submit, or [open in a new tab](https://huggingface.co/spaces/abidlabs/full-context-asr)):\n\n\n\n## 3. Create a Streaming ASR Demo with Transformers\n\nOk great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a *streaming* interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.\n\nThe good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same `Wav2Vec2` model. \n\nThe biggest change is that we must now introduce a `state` parameter, which holds the audio that has been *transcribed so far*. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed. \n\nWhen adding state to a Gradio demo, you need to do a total of 3 things:\n\n* Add a `state` parameter to the function\n* Return the updated `state` at the end of the function\n* Add the `\"state\"` components to the `inputs` and `outputs` in `Interface` \n\nHere's what the code looks like:\n\n```python\ndef transcribe(audio, state=\"\"):\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\" \n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nNotice that we've also made one other change, which is that we've set `live=True`. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.\n\nLet's see how it does (try below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr))!\n\n\n\n\nOne thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the `transcribe()` function so that longer audio chunks are processed. We can do this by adding a `time.sleep()` inside the function, as shown below (we'll see a proper fix next) \n\n```python\nfrom transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n time.sleep(2)\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\"\n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nTry the demo below to see the difference (or [open in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused))!\n\n\n\n\n## 4. Create a Streaming ASR Demo with DeepSpeech\n\nYou're not restricted to ASR models from the `transformers` library -- you can use your own models or models from other libraries. The `DeepSpeech` library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.\n\nGoing through the DeepSpeech library is beyond the scope of this Guide (check out their [excellent documentation here](https://deepspeech.readthedocs.io/en/r0.9/)), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model. \n\nHere's a complete example (on Linux):\n\nFirst install the DeepSpeech library and download the pretrained models from the terminal:\n\n```bash\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n```\n\nThen, create a similar `transcribe()` function as before:\n\n```python\nfrom deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n if sr not in (\n 48000,\n 16000,\n ): # Deepspeech only supports 16k, (we convert 48k -> 16k)\n raise ValueError(\"Unsupported rate\", sr)\n if sr == 48000:\n y = (\n ((y / max(np.max(y), 1)) * 32767)\n .reshape((-1, 3))\n .mean(axis=1)\n .astype(\"int16\")\n )\n sr = 16000\n return sr, y\n\n\ndef transcribe(speech, stream):\n _, y = reformat_freq(*speech)\n if stream is None:\n stream = model.createStream()\n stream.feedAudioContent(y)\n text = stream.intermediateDecode()\n return text, stream\n\n```\n\nThen, create a Gradio Interface as before (the only difference being that the return type should be `numpy` instead of a `filepath` to be compatible with the DeepSpeech models)\n\n```python\nimport gradio as gr\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"numpy\"), \n \"state\" \n ], \n outputs= [\n \"text\", \n \"state\"\n ], \n live=True).launch()\n```\n\nRunning all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.\n\n--------------------------------------------\n\n\nAnd you're done! That's all the code you need to build a web-based GUI for your ASR model. \n\nFun tip: you can share your ASR model instantly with others simply by setting `share=True` in `launch()`. \n\n\n", - "html": "

Real Time Speech Recognition

\n\n

Introduction

\n\n

Automatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).

\n\n

Using gradio, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.

\n\n

This tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a full-context model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it streaming, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or in a new tab!):

\n\n\n\n

Real-time ASR is inherently stateful, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use state with Gradio demos.

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:

\n\n
    \n
  • Transformers (for this, pip install transformers and pip install torch)
  • \n
  • DeepSpeech (pip install deepspeech==0.8.2)
  • \n
\n\n

Make sure you have at least one of these installed so that you can follow along the tutorial. You will also need ffmpeg installed on your system, if you do not already have it, to process files from the microphone.

\n\n

Here's how to build a real time speech recognition (ASR) app:

\n\n
    \n
  1. Set up the Transformers ASR Model
  2. \n
  3. Create a Full-Context ASR Demo with Transformers
  4. \n
  5. Create a Streaming ASR Demo with Transformers
  6. \n
  7. Create a Streaming ASR Demo with DeepSpeech
  8. \n
\n\n

1. Set up the Transformers ASR Model

\n\n

First, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, Wav2Vec2.

\n\n

Here is the code to load Wav2Vec2 from Hugging Face transformers.

\n\n
from transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n
\n\n

That's it! By default, the automatic speech recognition model pipeline loads Facebook's facebook/wav2vec2-base-960h model.

\n\n

2. Create a Full-Context ASR Demo with Transformers

\n\n

We will start by creating a full-context ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the pipeline object above.

\n\n

We will use gradio's built in Audio component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain Textbox.

\n\n
import gradio as gr\n\ndef transcribe(audio):\n    text = p(audio)[\"text\"]\n    return text\n\ngr.Interface(\n    fn=transcribe, \n    inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n    outputs=\"text\").launch()\n
\n\n

So what's happening here? The transcribe function takes a single parameter, audio, which is a filepath to the audio file that the user has recorded. The pipeline object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox.

\n\n

Let's see it in action! (Record a short audio clip and then click submit, or open in a new tab):

\n\n\n\n

3. Create a Streaming ASR Demo with Transformers

\n\n

Ok great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a streaming interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.

\n\n

The good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same Wav2Vec2 model.

\n\n

The biggest change is that we must now introduce a state parameter, which holds the audio that has been transcribed so far. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed.

\n\n

When adding state to a Gradio demo, you need to do a total of 3 things:

\n\n
    \n
  • Add a state parameter to the function
  • \n
  • Return the updated state at the end of the function
  • \n
  • Add the \"state\" components to the inputs and outputs in Interface
  • \n
\n\n

Here's what the code looks like:

\n\n
def transcribe(audio, state=\"\"):\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\" \n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Notice that we've also made one other change, which is that we've set live=True. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.

\n\n

Let's see how it does (try below or in a new tab)!

\n\n\n\n

One thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the transcribe() function so that longer audio chunks are processed. We can do this by adding a time.sleep() inside the function, as shown below (we'll see a proper fix next)

\n\n
from transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n    time.sleep(2)\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\"\n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Try the demo below to see the difference (or open in a new tab)!

\n\n\n\n

4. Create a Streaming ASR Demo with DeepSpeech

\n\n

You're not restricted to ASR models from the transformers library -- you can use your own models or models from other libraries. The DeepSpeech library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.

\n\n

Going through the DeepSpeech library is beyond the scope of this Guide (check out their excellent documentation here), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model.

\n\n

Here's a complete example (on Linux):

\n\n

First install the DeepSpeech library and download the pretrained models from the terminal:

\n\n
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n
\n\n

Then, create a similar transcribe() function as before:

\n\n
from deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n    if sr not in (\n        48000,\n        16000,\n    ):  # Deepspeech only supports 16k, (we convert 48k -> 16k)\n        raise ValueError(\"Unsupported rate\", sr)\n    if sr == 48000:\n        y = (\n            ((y / max(np.max(y), 1)) * 32767)\n            .reshape((-1, 3))\n            .mean(axis=1)\n            .astype(\"int16\")\n        )\n        sr = 16000\n    return sr, y\n\n\ndef transcribe(speech, stream):\n    _, y = reformat_freq(*speech)\n    if stream is None:\n        stream = model.createStream()\n    stream.feedAudioContent(y)\n    text = stream.intermediateDecode()\n    return text, stream\n\n
\n\n

Then, create a Gradio Interface as before (the only difference being that the return type should be numpy instead of a filepath to be compatible with the DeepSpeech models)

\n\n
import gradio as gr\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"numpy\"), \n        \"state\" \n    ], \n    outputs= [\n        \"text\", \n        \"state\"\n    ], \n    live=True).launch()\n
\n\n

Running all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.

\n\n
\n\n

And you're done! That's all the code you need to build a web-based GUI for your ASR model.

\n\n

Fun tip: you can share your ASR model instantly with others simply by setting share=True in launch().

\n", - "tags": ["ASR", "SPEECH", "STREAMING"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/streaming-asr-paused", - "https://huggingface.co/spaces/abidlabs/full-context-asr" - ], - "url": "/guides/real-time-speech-recognition/", - "contributor": null - } - ], - "preprocessing": "passes textarea value as a str into the function.", - "postprocessing": "expects a str returned from function and sets textarea value to it.", - "examples-format": "a str representing the textbox input.", - "parent": "gradio", - "prev_obj": "State", - "next_obj": "Timeseries" - }, - "timeseries": { - "class": null, - "name": "Timeseries", - "description": "Creates a component that can be used to upload/preview timeseries csv files or display a dataframe consisting of a time series graphically.", - "tags": { - "preprocessing": "passes the uploaded timeseries data as a {pandas.DataFrame} into the function", - "postprocessing": "expects a {pandas.DataFrame} or {str} path to a csv to be returned, which is then displayed as a timeseries graph", - "examples-format": "a {str} filepath of csv data with time series data.", - "demos": "fraud_detector" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | Callable | None", - "doc": "File path for the timeseries csv file. If callable, the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "x", - "annotation": "str | None", - "doc": "Column name of x (time) series. None if csv has no headers, in which case first column is x series.", - "default": "None" - }, - { - "name": "y", - "annotation": "str | list[str] | None", - "doc": "Column name of y series, or list of column names if multiple series. None if csv has no headers, in which case every column after first is a y series.", - "default": "None" - }, - { - "name": "colors", - "annotation": "list[str] | None", - "doc": "an ordered list of colors to use for each line plot", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, will allow users to upload a timeseries csv; if False, can only be used to display timeseries data. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Timeseries" - } - ], - "string_shortcuts": [ - ["Timeseries", "timeseries", "Uses default values"] - ], - "demos": [ - [ - "fraud_detector", - "import random\nimport os\nimport gradio as gr\n\n\ndef fraud_detector(card_activity, categories, sensitivity):\n activity_range = random.randint(0, 100)\n drop_columns = [\n column for column in [\"retail\", \"food\", \"other\"] if column not in categories\n ]\n if len(drop_columns):\n card_activity.drop(columns=drop_columns, inplace=True)\n return (\n card_activity,\n card_activity,\n {\"fraud\": activity_range / 100.0, \"not fraud\": 1 - activity_range / 100.0},\n )\n\n\ndemo = gr.Interface(\n fraud_detector,\n [\n gr.Timeseries(x=\"time\", y=[\"retail\", \"food\", \"other\"]),\n gr.CheckboxGroup(\n [\"retail\", \"food\", \"other\"], value=[\"retail\", \"food\", \"other\"]\n ),\n gr.Slider(1, 3),\n ],\n [\n \"dataframe\",\n gr.Timeseries(x=\"time\", y=[\"retail\", \"food\", \"other\"]),\n gr.Label(label=\"Fraud Level\"),\n ],\n examples=[\n [os.path.join(os.path.dirname(__file__), \"fraud.csv\"), [\"retail\", \"food\", \"other\"], 1.0],\n ],\n)\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "passes the uploaded timeseries data as a pandas.DataFrame into the function", - "postprocessing": "expects a pandas.DataFrame or str path to a csv to be returned, which is then displayed as a timeseries graph", - "examples-format": "a str filepath of csv data with time series data.", - "parent": "gradio", - "prev_obj": "Textbox", - "next_obj": "UploadButton" - }, - "uploadbutton": { - "class": null, - "name": "UploadButton", - "description": "Used to create an upload button, when cicked allows a user to upload files that satisfy the specified file type or generic files (if file_type not set).", - "tags": { - "preprocessing": "passes the uploaded file as a {file-object} or {List[file-object]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`)", - "postprocessing": "expects function to return a {str} path to a file, or {List[str]} consisting of paths to files.", - "examples-format": "a {str} path to a local file that populates the component.", - "demos": "upload_button" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "label", - "annotation": "str", - "doc": "Text to display on the button. Defaults to \"Upload a File\".", - "default": "\"Upload a File\"" - }, - { - "name": "value", - "annotation": "str | list[str] | Callable | None", - "doc": "File or list of files to upload by default.", - "default": "None" - }, - { - "name": "variant", - "annotation": "Literal['primary', 'secondary', 'stop']", - "doc": "'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button.", - "default": "\"secondary\"" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "size", - "annotation": "Literal['sm', 'lg'] | None", - "doc": "Size of the button. Can be \"sm\" or \"lg\".", - "default": "None" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int | None", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "None" - }, - { - "name": "interactive", - "annotation": "bool", - "doc": "If False, the UploadButton will be in a disabled state.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "type", - "annotation": "Literal['file', 'bytes']", - "doc": "Type of value to be returned by component. \"file\" returns a temporary file object with the same base name as the uploaded file, whose full path can be retrieved by file_obj.name, \"binary\" returns an bytes object.", - "default": "\"file\"" - }, - { - "name": "file_count", - "annotation": "Literal['single', 'multiple', 'directory']", - "doc": "if single, allows user to upload one file. If \"multiple\", user uploads multiple files. If \"directory\", user uploads all files in selected directory. Return type will be list for each file in case of \"multiple\" or \"directory\".", - "default": "\"single\"" - }, - { - "name": "file_types", - "annotation": "list[str] | None", - "doc": "List of type of files to be uploaded. \"file\" allows any file to be uploaded, \"image\" allows only image files to be uploaded, \"audio\" allows only audio files to be uploaded, \"video\" allows only video files to be uploaded, \"text\" allows only text files to be uploaded.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "click", - "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.UploadButton" - }, - { - "fn": null, - "name": "upload", - "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.UploadButton" - } - ], - "string_shortcuts": [ - ["UploadButton", "uploadbutton", "Uses default values"] - ], - "demos": [ - [ - "upload_button", - "import gradio as gr\n\ndef upload_file(files):\n file_paths = [file.name for file in files]\n return file_paths\n\nwith gr.Blocks() as demo:\n file_output = gr.File()\n upload_button = gr.UploadButton(\"Click to Upload a File\", file_types=[\"image\", \"video\"], file_count=\"multiple\")\n upload_button.upload(upload_file, upload_button, file_output)\n\ndemo.launch()\n" - ] - ], - "preprocessing": "passes the uploaded file as a file-object or List[file-object] depending on `file_count` (or a bytes/Listbytes depending on `type`)", - "postprocessing": "expects function to return a str path to a file, or List[str] consisting of paths to files.", - "examples-format": "a str path to a local file that populates the component.", - "parent": "gradio", - "prev_obj": "Timeseries", - "next_obj": "Video" - }, - "video": { - "class": null, - "name": "Video", - "description": "Creates a video component that can be used to upload/record videos (as an input) or display videos (as an output). For the video to be playable in the browser it must have a compatible container and codec combination. Allowed combinations are .mp4 with h264 codec, .ogg with theora codec, and .webm with vp9 codec. If the component detects that the output video would not be playable in the browser it will attempt to convert it to a playable mp4 video. If the conversion fails, the original video is returned.", - "tags": { - "preprocessing": "passes the uploaded video as a {str} filepath or URL whose extension can be modified by `format`.", - "postprocessing": "expects a {str} or {pathlib.Path} filepath to a video which is displayed, or a {Tuple[str | pathlib.Path, str | pathlib.Path | None]} where the first element is a filepath to a video and the second element is an optional filepath to a subtitle file.", - "examples-format": "a {str} filepath to a local file that contains the video, or a {Tuple[str, str]} where the first element is a filepath to a video file and the second element is a filepath to a subtitle file.", - "demos": "video_identity, video_subtitle" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "value", - "annotation": "str | Path | tuple[str | Path, str | Path | None] | Callable | None", - "doc": "A path or URL for the default value that Video component is going to take. Can also be a tuple consisting of (video filepath, subtitle filepath). If a subtitle file is provided, it should be of type .srt or .vtt. Or can be callable, in which case the function will be called whenever the app loads to set the initial value of the component.", - "default": "None" - }, - { - "name": "format", - "annotation": "str | None", - "doc": "Format of video format to be returned by component, such as 'avi' or 'mp4'. Use 'mp4' to ensure browser playability. If set to None, video will keep uploaded format.", - "default": "None" - }, - { - "name": "source", - "annotation": "Literal['upload', 'webcam']", - "doc": "Source of video. \"upload\" creates a box where user can drop an video file, \"webcam\" allows user to record a video from their webcam.", - "default": "\"upload\"" - }, - { - "name": "height", - "annotation": "int | None", - "doc": "Height of the displayed video in pixels.", - "default": "None" - }, - { - "name": "width", - "annotation": "int | None", - "doc": "Width of the displayed video in pixels.", - "default": "None" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "component name in interface.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", - "default": "None" - }, - { - "name": "show_label", - "annotation": "bool | None", - "doc": "if True, will display label.", - "default": "None" - }, - { - "name": "container", - "annotation": "bool", - "doc": "If True, will place the component in a container - providing some extra padding around the border.", - "default": "True" - }, - { - "name": "scale", - "annotation": "int | None", - "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", - "default": "None" - }, - { - "name": "min_width", - "annotation": "int", - "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", - "default": "160" - }, - { - "name": "interactive", - "annotation": "bool | None", - "doc": "if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output.", - "default": "None" - }, - { - "name": "visible", - "annotation": "bool", - "doc": "If False, component will be hidden.", - "default": "True" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "elem_classes", - "annotation": "list[str] | str | None", - "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", - "default": "None" - }, - { - "name": "mirror_webcam", - "annotation": "bool", - "doc": "If True webcam will be mirrored. Default is True.", - "default": "True" - }, - { - "name": "include_audio", - "annotation": "bool | None", - "doc": "Whether the component should record/retain the audio track for a video. By default, audio is excluded for webcam videos and included for uploaded videos.", - "default": "None" - }, - { - "name": "autoplay", - "annotation": "bool", - "doc": "Whether to automatically play the video when the component is used as an output. Note: browsers will not autoplay video files if the user has not interacted with the page yet.", - "default": "False" - }, - { - "name": "show_share_button", - "annotation": "bool | None", - "doc": "If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "change", - "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Video" - }, - { - "fn": null, - "name": "clear", - "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Video" - }, - { - "fn": null, - "name": "play", - "description": "This listener is triggered when the user plays the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Video" - }, - { - "fn": null, - "name": "pause", - "description": "This listener is triggered when the media stops playing for any reason (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Video" - }, - { - "fn": null, - "name": "stop", - "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Video" - }, - { - "fn": null, - "name": "end", - "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Video" - }, - { - "fn": null, - "name": "start_recording", - "description": "This listener is triggered when the user starts recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Video" - }, - { - "fn": null, - "name": "stop_recording", - "description": "This listener is triggered when the user stops recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Video" - }, - { - "fn": null, - "name": "upload", - "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", - "tags": {}, - "parameters": [ - { - "name": "fn", - "annotation": "Callable | None", - "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component." - }, - { - "name": "inputs", - "annotation": "Component | Sequence[Component] | set[Component] | None", - "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", - "default": "None" - }, - { - "name": "outputs", - "annotation": "Component | Sequence[Component] | None", - "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", - "default": "None" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "None" - }, - { - "name": "status_tracker", - "annotation": "None", - "doc": null, - "default": "None" - }, - { - "name": "scroll_to_output", - "annotation": "bool", - "doc": "If True, will scroll to output component on completion", - "default": "False" - }, - { - "name": "show_progress", - "annotation": "Literal['full', 'minimal', 'hidden']", - "doc": "If True, will show progress animation while pending", - "default": "\"full\"" - }, - { - "name": "queue", - "annotation": "bool | None", - "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", - "default": "None" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", - "default": "False" - }, - { - "name": "max_batch_size", - "annotation": "int", - "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", - "default": "4" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", - "default": "True" - }, - { - "name": "cancels", - "annotation": "dict[str, Any] | list[dict[str, Any]] | None", - "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", - "default": "None" - }, - { - "name": "every", - "annotation": "float | None", - "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", - "default": "None" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Video" - } - ], - "string_shortcuts": [ - ["Video", "video", "Uses default values"], - ["PlayableVideo", "playablevideo", "Uses format=\"mp4\""] - ], - "demos": [ - [ - "video_identity", - "import gradio as gr\nimport os\n\n\ndef video_identity(video):\n return video\n\n\ndemo = gr.Interface(video_identity, \n gr.Video(), \n \"playable_video\", \n examples=[\n os.path.join(os.path.dirname(__file__), \n \"video/video_sample.mp4\")], \n cache_examples=True)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "video_subtitle", - "import gradio as gr\nimport os\n\na = os.path.join(os.path.dirname(__file__), \"files/a.mp4\") # Video\nb = os.path.join(os.path.dirname(__file__), \"files/b.mp4\") # Video\ns1 = os.path.join(os.path.dirname(__file__), \"files/s1.srt\") # Subtitle\ns2 = os.path.join(os.path.dirname(__file__), \"files/s2.vtt\") # Subtitle\n\n\ndef video_demo(video, subtitle=None):\n if subtitle is None:\n return video\n\n return [video, subtitle.name]\n\n\ndemo = gr.Interface(\n fn=video_demo,\n inputs=[\n gr.Video(type=\"file\", label=\"In\", interactive=True),\n gr.File(label=\"Subtitle\", file_types=[\".srt\", \".vtt\"]),\n ],\n outputs=gr.Video(label=\"Out\"),\n examples=[\n [a, s1],\n [b, s2],\n [a, None],\n ],\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "preprocessing": "passes the uploaded video as a str filepath or URL whose extension can be modified by `format`.", - "postprocessing": "expects a str or pathlib.Path filepath to a video which is displayed, or a Tuple[str | pathlib.Path, str | pathlib.Path | None] where the first element is a filepath to a video and the second element is an optional filepath to a subtitle file.", - "examples-format": "a str filepath to a local file that contains the video, or a Tuple[str, str] where the first element is a filepath to a video file and the second element is a filepath to a subtitle file.", - "parent": "gradio", - "prev_obj": "UploadButton", - "next_obj": "Examples" - } - }, - "helpers": { - "error": { - "class": null, - "name": "Error", - "description": "This class allows you to pass custom error messages to the user. You can do so by raising a gr.Error(\"custom message\") anywhere in the code, and when that line is executed the custom message will appear in a modal on the demo.
", - "tags": { "demos": "calculator" }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "message", - "annotation": "", - "doc": "The error message to be displayed to the user.", - "default": "\"Error raised.\"" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [], - "demos": [ - [ - "calculator", - "import gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n if num2 == 0:\n raise gr.Error(\"Cannot divide by zero!\")\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\", \n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n examples=[\n [5, \"add\", 3],\n [4, \"divide\", 2],\n [-4, \"multiply\", 2.5],\n [0, \"subtract\", 1.2],\n ],\n title=\"Toy Calculator\",\n description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "parent": "gradio", - "prev_obj": "Video", - "next_obj": "load" - }, - "load": { - "class": null, - "name": "load", - "description": "Method that constructs a Blocks from a Hugging Face repo. Can accept model repos (if src is \"models\") or Space repos (if src is \"spaces\"). The input and output components are automatically loaded from the repo.", - "tags": { - "parameters": "name: the name of the model (e.g. \"gpt2\" or \"facebook/bart-base\") or space (e.g. \"flax-community/spanish-gpt2\"), can include the `src` as prefix (e.g. \"models/facebook/bart-base\")
src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
api_key: Deprecated. Please use the `hf_token` parameter instead.
hf_token: optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.
alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)", - "returns": "a Gradio Blocks object for the given model" - }, - "parameters": [ - { - "name": "name", - "annotation": "str", - "doc": "the name of the model (e.g. \"gpt2\" or \"facebook/bart-base\") or space (e.g. \"flax-community/spanish-gpt2\"), can include the `src` as prefix (e.g. \"models/facebook/bart-base\")" - }, - { - "name": "src", - "annotation": "str | None", - "doc": "the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)", - "default": "None" - }, - { - "name": "api_key", - "annotation": "str | None", - "doc": "Deprecated. Please use the `hf_token` parameter instead.", - "default": "None" - }, - { - "name": "hf_token", - "annotation": "str | None", - "doc": "optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.", - "default": "None" - }, - { - "name": "alias", - "annotation": "str | None", - "doc": "optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)", - "default": "None" - } - ], - "returns": { - "annotation": null, - "doc": "a Gradio Blocks object for the given model" - }, - "example": "import gradio as gr\ndemo = gr.load(\"gradio/question-answering\", src=\"spaces\")\ndemo.launch()", - "fns": [], - "parent": "gradio", - "prev_obj": "Error", - "next_obj": "Examples" - }, - "examples": { - "class": null, - "name": "Examples", - "description": "This class is a wrapper over the Dataset component and can be used to create Examples for Blocks / Interfaces. Populates the Dataset component with examples and assigns event listener so that clicking on an example populates the input/output components. Optionally handles example caching for fast inference.
", - "tags": { - "demos": "blocks_inputs, fake_gan", - "guides": "more-on-examples-and-flagging, using-hugging-face-integrations, image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, create-your-own-friends-with-a-gan" - }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "examples", - "annotation": "list[Any] | list[list[Any]] | str", - "doc": "example inputs that can be clicked to populate specific components. Should be nested list, in which the outer list consists of samples and each inner list consists of an input corresponding to each input component. A string path to a directory of examples can also be provided but it should be within the directory with the python file running the gradio app. If there are multiple input components and a directory is provided, a log.csv file must be present in the directory to link corresponding inputs." - }, - { - "name": "inputs", - "annotation": "IOComponent | list[IOComponent]", - "doc": "the component or list of components corresponding to the examples" - }, - { - "name": "outputs", - "annotation": "IOComponent | list[IOComponent] | None", - "doc": "optionally, provide the component or list of components corresponding to the output of the examples. Required if `cache` is True.", - "default": "None" - }, - { - "name": "fn", - "annotation": "Callable | None", - "doc": "optionally, provide the function to run to generate the outputs corresponding to the examples. Required if `cache` is True.", - "default": "None" - }, - { - "name": "cache_examples", - "annotation": "bool", - "doc": "if True, caches examples for fast runtime. If True, then `fn` and `outputs` must be provided. If `fn` is a generator function, then the last yielded value will be used as the output.", - "default": "False" - }, - { - "name": "examples_per_page", - "annotation": "int", - "doc": "how many examples to show per page.", - "default": "10" - }, - { - "name": "label", - "annotation": "str | None", - "doc": "the label to use for the examples component (by default, \"Examples\")", - "default": "\"Examples\"" - }, - { - "name": "elem_id", - "annotation": "str | None", - "doc": "an optional string that is assigned as the id of this component in the HTML DOM.", - "default": "None" - }, - { - "name": "run_on_click", - "annotation": "bool", - "doc": "if cache_examples is False, clicking on an example does not run the function when an example is clicked. Set this to True to run the function when an example is clicked. Has no effect if cache_examples is True.", - "default": "False" - }, - { - "name": "preprocess", - "annotation": "bool", - "doc": "if True, preprocesses the example input before running the prediction function and caching the output. Only applies if cache_examples is True.", - "default": "True" - }, - { - "name": "postprocess", - "annotation": "bool", - "doc": "if True, postprocesses the example output after running the prediction function and before caching. Only applies if cache_examples is True.", - "default": "True" - }, - { - "name": "api_name", - "annotation": "str | None | Literal[False]", - "doc": "Defines how the event associated with clicking on the examples appears in the API docs. Can be a string, None, or False. If False (default), the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", - "default": "False" - }, - { - "name": "batch", - "annotation": "bool", - "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. Used only if cache_examples is True.", - "default": "False" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [], - "demos": [ - [ - "blocks_inputs", - "import gradio as gr\nimport os\n\n\ndef combine(a, b):\n return a + \" \" + b\n\n\ndef mirror(x):\n return x\n\n\nwith gr.Blocks() as demo:\n\n txt = gr.Textbox(label=\"Input\", lines=2)\n txt_2 = gr.Textbox(label=\"Input 2\")\n txt_3 = gr.Textbox(value=\"\", label=\"Output\")\n btn = gr.Button(value=\"Submit\")\n btn.click(combine, inputs=[txt, txt_2], outputs=[txt_3])\n\n with gr.Row():\n im = gr.Image()\n im_2 = gr.Image()\n\n btn = gr.Button(value=\"Mirror Image\")\n btn.click(mirror, inputs=[im], outputs=[im_2])\n\n gr.Markdown(\"## Text Examples\")\n gr.Examples(\n [[\"hi\", \"Adam\"], [\"hello\", \"Eve\"]],\n [txt, txt_2],\n txt_3,\n combine,\n cache_examples=True,\n )\n gr.Markdown(\"## Image Examples\")\n gr.Examples(\n examples=[os.path.join(os.path.dirname(__file__), \"lion.jpg\")],\n inputs=im,\n outputs=im_2,\n fn=mirror,\n cache_examples=True,\n )\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "fake_gan", - "# This demo needs to be run from the repo folder.\n# python demo/fake_gan/run.py\nimport random\n\nimport gradio as gr\n\n\ndef fake_gan():\n images = [\n (random.choice(\n [\n \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n ]\n ), f\"label {i}\" if i != 0 else \"label\" * 50)\n for i in range(3)\n ]\n return images\n\n\nwith gr.Blocks() as demo:\n with gr.Column(variant=\"panel\"):\n with gr.Row(variant=\"compact\"):\n text = gr.Textbox(\n label=\"Enter your prompt\",\n show_label=False,\n max_lines=1,\n placeholder=\"Enter your prompt\",\n ).style(\n container=False,\n )\n btn = gr.Button(\"Generate image\").style(full_width=False)\n\n gallery = gr.Gallery(\n label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n ).style(columns=[2], rows=[2], object_fit=\"contain\", height=\"auto\")\n\n btn.click(fake_gan, None, gallery)\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ] - ], - "guides": [ - { - "name": "using-hugging-face-integrations", - "category": "integrating-other-frameworks", - "pretty_category": "Integrating Other Frameworks", - "guide_index": 1, - "absolute_index": 16, - "pretty_name": "Using Hugging Face Integrations", - "content": "# Using Hugging Face Integrations\n\n\n\n\n\n\n## Introduction\n\nThe Hugging Face Hub is a central platform that has over 190,000 [models](https://huggingface.co/models), 32,000 [datasets](https://huggingface.co/datasets) and 40,000 [demos](https://huggingface.co/spaces), also known as Spaces. Although Hugging Face is famous for its \ud83e\udd17 transformers and diffusers libraries, the Hub also supports dozens of ML libraries, such as PyTorch, TensorFlow, spaCy, and many others across a variety of domains, from computer vision to reinforcement learning.\n\nGradio has multiple features that make it extremely easy to leverage existing models and Spaces on the Hub. This guide walks through these features.\n\n## Using regular inference with `pipeline`\n\nFirst, let's build a simple interface that translates text from English to Spanish. Between the over a thousand models shared by the University of Helsinki, there is an [existing model](https://huggingface.co/Helsinki-NLP/opus-mt-en-es), `opus-mt-en-es`, that does precisely this!\n\nThe \ud83e\udd17 transformers library has a very easy-to-use abstraction, [`pipeline()`](https://huggingface.co/docs/transformers/v4.16.2/en/main_classes/pipelines#transformers.pipeline) that handles most of the complex code to offer a simple API for common tasks. By specifying the task and an (optional) model, you can use an existing model with few lines:\n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndef predict(text):\n return pipe(text)[0][\"translation_text\"]\n \ndemo = gr.Interface(\n fn=predict, \n inputs='text',\n outputs='text',\n)\n\ndemo.launch()\n```\n\nBut `gradio` actually makes it even easier to convert a `pipeline` to a demo, simply by using the `gradio.Interface.from_pipeline` methods, which skips the need to specify the input and output components:\n\n```python\nfrom transformers import pipeline\nimport gradio as gr\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndemo = gr.Interface.from_pipeline(pipe)\ndemo.launch()\n```\n\nThe previous code produces the following interface, which you can try right here in your browser: \n\n\n\n\n\n## Using Hugging Face Inference API\n\nHugging Face has a free service called the [Inference API](https://huggingface.co/inference-api), which allows you to send HTTP requests to models in the Hub. For transformers or diffusers-based models, the API can be 2 to 10 times faster than running the inference yourself. The API is free (rate limited), and you can switch to dedicated [Inference Endpoints](https://huggingface.co/pricing) when you want to use it in production.\n\nLet's try the same demo as above but using the Inference API instead of loading the model yourself. Given a Hugging Face model supported in the Inference API, Gradio can automatically infer the expected input and output and make the underlying server calls, so you don't have to worry about defining the prediction function. Here is what the code would look like!\n\n```python\nimport gradio as gr\n\ndemo = gr.load(\"Helsinki-NLP/opus-mt-en-es\", src=\"models\")\n\ndemo.launch()\n```\n\nNotice that we just put specify the model name and state that the `src` should be `models` (Hugging Face's Model Hub). There is no need to install any dependencies (except `gradio`) since you are not loading the model on your computer.\n\nYou might notice that the first inference takes about 20 seconds. This happens since the Inference API is loading the model in the server. You get some benefits afterward:\n\n* The inference will be much faster.\n* The server caches your requests.\n* You get built-in automatic scaling.\n\n## Hosting your Gradio demos\n\n[Hugging Face Spaces](https://hf.co/spaces) allows anyone to host their Gradio demos freely, and uploading your Gradio demos take a couple of minutes. You can head to [hf.co/new-space](https://huggingface.co/new-space), select the Gradio SDK, create an `app.py` file, and voila! You have a demo you can share with anyone else. To learn more, read [this guide how to host on Hugging Face Spaces using the website](https://huggingface.co/blog/gradio-spaces).\n\n\nAlternatively, you can create a Space programmatically, making use of the [huggingface_hub client library](https://huggingface.co/docs/huggingface_hub/index) library. Here's an example:\n\n```python\nfrom huggingface_hub import (\n create_repo,\n get_full_repo_name,\n upload_file,\n)\ncreate_repo(name=target_space_name, token=hf_token, repo_type=\"space\", space_sdk=\"gradio\")\nrepo_name = get_full_repo_name(model_id=target_space_name, token=hf_token)\nfile_url = upload_file(\n path_or_fileobj=\"file.txt\",\n path_in_repo=\"app.py\",\n repo_id=repo_name,\n repo_type=\"space\",\n token=hf_token,\n)\n```\nHere, `create_repo` creates a gradio repo with the target name under a specific account using that account's Write Token. `repo_name` gets the full repo name of the related repo. Finally `upload_file` uploads a file inside the repo with the name `app.py`.\n\n\n\n## Embedding your Space demo on other websites\n\nThroughout this guide, you've seen many embedded Gradio demos. You can also do this on own website! The first step is to create a Hugging Face Space with the demo you want to showcase. Then, [follow the steps here to embed the Space on your website](/guides/sharing-your-app/#embedding-hosted-spaces).\n\n\n## Loading demos from Spaces\n\nYou can also use and remix existing Gradio demos on Hugging Face Spaces. For example, you could take two existing Gradio demos and put them as separate tabs and create a new demo. You can run this new demo locally, or upload it to Spaces, allowing endless possibilities to remix and create new demos!\n\nHere's an example that does exactly that:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Tab(\"Translate to Spanish\"):\n gr.load(\"gradio/helsinki_translation_en_es\", src=\"spaces\")\n with gr.Tab(\"Translate to French\"):\n gr.load(\"abidlabs/en2fr\", src=\"spaces\")\n\ndemo.launch()\n```\n\nNotice that we use `gr.load()`, the same method we used to load models using the Inference API. However, here we specify that the `src` is `spaces` (Hugging Face Spaces).\n\n## Recap\n\nThat's it! Let's recap the various ways Gradio and Hugging Face work together:\n\n1. You can convert a `transformers` pipeline into a Gradio demo using `from_pipeline()`\n2. You can build a demo around the Inference API without having to load the model easily using `gr.load()`\n3. You host your Gradio demo on Hugging Face Spaces, either using the GUI or entirely in Python.\n4. You can embed Gradio demos that are hosted on Hugging Face Spaces onto your own website.\n5. You can load demos from Hugging Face Spaces to remix and create new Gradio demos using `gr.load()`.\n\n\n\ud83e\udd17\n", - "html": "

Using Hugging Face Integrations

\n\n

Introduction

\n\n

The Hugging Face Hub is a central platform that has over 190,000 models, 32,000 datasets and 40,000 demos, also known as Spaces. Although Hugging Face is famous for its \ud83e\udd17 transformers and diffusers libraries, the Hub also supports dozens of ML libraries, such as PyTorch, TensorFlow, spaCy, and many others across a variety of domains, from computer vision to reinforcement learning.

\n\n

Gradio has multiple features that make it extremely easy to leverage existing models and Spaces on the Hub. This guide walks through these features.

\n\n

Using regular inference with pipeline

\n\n

First, let's build a simple interface that translates text from English to Spanish. Between the over a thousand models shared by the University of Helsinki, there is an existing model, opus-mt-en-es, that does precisely this!

\n\n

The \ud83e\udd17 transformers library has a very easy-to-use abstraction, pipeline() that handles most of the complex code to offer a simple API for common tasks. By specifying the task and an (optional) model, you can use an existing model with few lines:

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndef predict(text):\n  return pipe(text)[0][\"translation_text\"]\n\ndemo = gr.Interface(\n  fn=predict, \n  inputs='text',\n  outputs='text',\n)\n\ndemo.launch()\n
\n\n

But gradio actually makes it even easier to convert a pipeline to a demo, simply by using the gradio.Interface.from_pipeline methods, which skips the need to specify the input and output components:

\n\n
from transformers import pipeline\nimport gradio as gr\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndemo = gr.Interface.from_pipeline(pipe)\ndemo.launch()\n
\n\n

The previous code produces the following interface, which you can try right here in your browser:

\n\n

\n\n

Using Hugging Face Inference API

\n\n

Hugging Face has a free service called the Inference API, which allows you to send HTTP requests to models in the Hub. For transformers or diffusers-based models, the API can be 2 to 10 times faster than running the inference yourself. The API is free (rate limited), and you can switch to dedicated Inference Endpoints when you want to use it in production.

\n\n

Let's try the same demo as above but using the Inference API instead of loading the model yourself. Given a Hugging Face model supported in the Inference API, Gradio can automatically infer the expected input and output and make the underlying server calls, so you don't have to worry about defining the prediction function. Here is what the code would look like!

\n\n
import gradio as gr\n\ndemo = gr.load(\"Helsinki-NLP/opus-mt-en-es\", src=\"models\")\n\ndemo.launch()\n
\n\n

Notice that we just put specify the model name and state that the src should be models (Hugging Face's Model Hub). There is no need to install any dependencies (except gradio) since you are not loading the model on your computer.

\n\n

You might notice that the first inference takes about 20 seconds. This happens since the Inference API is loading the model in the server. You get some benefits afterward:

\n\n
    \n
  • The inference will be much faster.
  • \n
  • The server caches your requests.
  • \n
  • You get built-in automatic scaling.
  • \n
\n\n

Hosting your Gradio demos

\n\n

Hugging Face Spaces allows anyone to host their Gradio demos freely, and uploading your Gradio demos take a couple of minutes. You can head to hf.co/new-space, select the Gradio SDK, create an app.py file, and voila! You have a demo you can share with anyone else. To learn more, read this guide how to host on Hugging Face Spaces using the website.

\n\n

Alternatively, you can create a Space programmatically, making use of the huggingface_hub client library library. Here's an example:

\n\n
from huggingface_hub import (\n    create_repo,\n    get_full_repo_name,\n    upload_file,\n)\ncreate_repo(name=target_space_name, token=hf_token, repo_type=\"space\", space_sdk=\"gradio\")\nrepo_name = get_full_repo_name(model_id=target_space_name, token=hf_token)\nfile_url = upload_file(\n    path_or_fileobj=\"file.txt\",\n    path_in_repo=\"app.py\",\n    repo_id=repo_name,\n    repo_type=\"space\",\n    token=hf_token,\n)\n
\n\n

Here, create_repo creates a gradio repo with the target name under a specific account using that account's Write Token. repo_name gets the full repo name of the related repo. Finally upload_file uploads a file inside the repo with the name app.py.

\n\n

Embedding your Space demo on other websites

\n\n

Throughout this guide, you've seen many embedded Gradio demos. You can also do this on own website! The first step is to create a Hugging Face Space with the demo you want to showcase. Then, follow the steps here to embed the Space on your website.

\n\n

Loading demos from Spaces

\n\n

You can also use and remix existing Gradio demos on Hugging Face Spaces. For example, you could take two existing Gradio demos and put them as separate tabs and create a new demo. You can run this new demo locally, or upload it to Spaces, allowing endless possibilities to remix and create new demos!

\n\n

Here's an example that does exactly that:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n  with gr.Tab(\"Translate to Spanish\"):\n    gr.load(\"gradio/helsinki_translation_en_es\", src=\"spaces\")\n  with gr.Tab(\"Translate to French\"):\n    gr.load(\"abidlabs/en2fr\", src=\"spaces\")\n\ndemo.launch()\n
\n\n

Notice that we use gr.load(), the same method we used to load models using the Inference API. However, here we specify that the src is spaces (Hugging Face Spaces).

\n\n

Recap

\n\n

That's it! Let's recap the various ways Gradio and Hugging Face work together:

\n\n
    \n
  1. You can convert a transformers pipeline into a Gradio demo using from_pipeline()
  2. \n
  3. You can build a demo around the Inference API without having to load the model easily using gr.load()
  4. \n
  5. You host your Gradio demo on Hugging Face Spaces, either using the GUI or entirely in Python.
  6. \n
  7. You can embed Gradio demos that are hosted on Hugging Face Spaces onto your own website.
  8. \n
  9. You can load demos from Hugging Face Spaces to remix and create new Gradio demos using gr.load().
  10. \n
\n\n

\ud83e\udd17

\n", - "tags": ["HUB", "SPACES", "EMBED"], - "spaces": [ - "https://huggingface.co/spaces/gradio/helsinki_translation_en_es" - ], - "url": "/guides/using-hugging-face-integrations/", - "contributor": "Omar Sanseviero \ud83e\udd99 " - }, - { - "name": "image-classification-in-pytorch", - "category": "integrating-other-frameworks", - "pretty_category": "Integrating Other Frameworks", - "guide_index": null, - "absolute_index": 20, - "pretty_name": "Image Classification In Pytorch", - "content": "# Image Classification in PyTorch\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained image classification model, so you should also have `torch` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from [PyTorch Hub](https://pytorch.org/hub/pytorch_vision_resnet/). You can use a different pretrained model or train your own. \n\n```python\nimport torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n```\n\nBecause we will be using the model for inference, we have called the `.eval()` method.\n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n inp = transforms.ToTensor()(inp).unsqueeze(0)\n with torch.no_grad():\n prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)} \n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `PIL` image\n\nThen, the function converts the image to a PIL Image and then eventually a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we use `Image(type=\"pil\")` which creates the component and handles the preprocessing to convert that to a `PIL` image. \n\nThe output component will be a `Label`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as `Label(num_top_classes=3)`.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n inputs=gr.Image(type=\"pil\"),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", - "html": "

Image Classification in PyTorch

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained image classification model, so you should also have torch installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from PyTorch Hub. You can use a different pretrained model or train your own.

\n\n
import torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n
\n\n

Because we will be using the model for inference, we have called the .eval() method.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n  inp = transforms.ToTensor()(inp).unsqueeze(0)\n  with torch.no_grad():\n    prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n    confidences = {labels[i]: float(prediction[i]) for i in range(1000)}    \n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a PIL image
  • \n
\n\n

Then, the function converts the image to a PIL Image and then eventually a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we use Image(type=\"pil\") which creates the component and handles the preprocessing to convert that to a PIL image.

\n\n

The output component will be a Label, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as Label(num_top_classes=3).

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=gr.Image(type=\"pil\"),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", - "tags": ["VISION", "RESNET", "PYTORCH"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/pytorch-image-classifier", - "https://huggingface.co/spaces/pytorch/ResNet", - "https://huggingface.co/spaces/pytorch/ResNext", - "https://huggingface.co/spaces/pytorch/SqueezeNet" - ], - "url": "/guides/image-classification-in-pytorch/", - "contributor": null - }, - { - "name": "image-classification-in-tensorflow", - "category": "integrating-other-frameworks", - "pretty_category": "Integrating Other Frameworks", - "guide_index": null, - "absolute_index": 21, - "pretty_name": "Image Classification In Tensorflow", - "content": "# Image Classification in TensorFlow and Keras\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained Keras image classification model, so you should also have `tensorflow` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from [Keras](https://keras.io/api/applications/mobilenet/). You can use a different pretrained model or train your own. \n\n```python\nimport tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n```\n\nThis line automatically downloads the MobileNet model and weights using the Keras library. \n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `numpy` array\n\nThen, the function adds a batch dimension, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we can use the `\"gradio.inputs.Image\"` class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.\n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=classify_image, \n inputs=gr.Image(shape=(224, 224)),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", - "html": "

Image Classification in TensorFlow and Keras

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained Keras image classification model, so you should also have tensorflow installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from Keras. You can use a different pretrained model or train your own.

\n\n
import tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n
\n\n

This line automatically downloads the MobileNet model and weights using the Keras library.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n  inp = inp.reshape((-1, 224, 224, 3))\n  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n  prediction = inception_net.predict(inp).flatten()\n  confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a numpy array
  • \n
\n\n

Then, the function adds a batch dimension, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we can use the \"gradio.inputs.Image\" class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=classify_image, \n             inputs=gr.Image(shape=(224, 224)),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", - "tags": ["VISION", "MOBILENET", "TENSORFLOW"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/keras-image-classifier" - ], - "url": "/guides/image-classification-in-tensorflow/", - "contributor": null - }, - { - "name": "image-classification-with-vision-transformers", - "category": "integrating-other-frameworks", - "pretty_category": "Integrating Other Frameworks", - "guide_index": null, - "absolute_index": 22, - "pretty_name": "Image Classification With Vision Transformers", - "content": "# Image Classification with Vision Transformers\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control. \n\nState-of-the-art image classifiers are based on the *transformers* architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a **single line of Python**, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started).\n\n## Step 1 \u2014 Choosing a Vision Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a model from the [Hugging Face Model Hub](https://huggingface.co/models?pipeline_tag=image-classification). The Hub contains thousands of models covering dozens of different machine learning tasks. \n\nExpand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.\n\nAt the time of writing, the most popular one is `google/vit-base-patch16-224`, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo. \n\n## Step 2 \u2014 Loading the Vision Transformer Model with Gradio\n\nWhen using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.\n\nBesides the import statement, it only takes a single line of Python to load and launch the demo. \n\nWe use the `gr.Interface.load()` method and pass in the path to the model including the `huggingface/` to designate that it is from the Hugging Face Hub.\n\n```python\nimport gradio as gr\n\ngr.Interface.load(\n \"huggingface/google/vit-base-patch16-224\",\n examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n```\n\nNotice that we have added one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. \n\nThis produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!\n\n\n\n----------\n\nAnd you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", - "html": "

Image Classification with Vision Transformers

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control.

\n\n

State-of-the-art image classifiers are based on the transformers architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a single line of Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Step 1 \u2014 Choosing a Vision Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a model from the Hugging Face Model Hub. The Hub contains thousands of models covering dozens of different machine learning tasks.

\n\n

Expand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.

\n\n

At the time of writing, the most popular one is google/vit-base-patch16-224, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo.

\n\n

Step 2 \u2014 Loading the Vision Transformer Model with Gradio

\n\n

When using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.

\n\n

Besides the import statement, it only takes a single line of Python to load and launch the demo.

\n\n

We use the gr.Interface.load() method and pass in the path to the model including the huggingface/ to designate that it is from the Hugging Face Hub.

\n\n
import gradio as gr\n\ngr.Interface.load(\n             \"huggingface/google/vit-base-patch16-224\",\n             examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n
\n\n

Notice that we have added one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples.

\n\n

This produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!

\n\n\n\n
\n\n

And you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", - "tags": ["VISION", "TRANSFORMERS", "HUB"], - "spaces": [ - "https://huggingface.co/spaces/abidlabs/vision-transformer" - ], - "url": "/guides/image-classification-with-vision-transformers/", - "contributor": null - }, - { - "name": "create-your-own-friends-with-a-gan", - "category": "other-tutorials", - "pretty_category": "Other Tutorials", - "guide_index": null, - "absolute_index": 34, - "pretty_name": "Create Your Own Friends With A Gan", - "content": "# Create Your Own Friends with a GAN\n\n\n\n\n\n\n\n## Introduction\n\nIt seems that cryptocurrencies, [NFTs](https://www.nytimes.com/interactive/2022/03/18/technology/nft-guide.html), and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets [may be taxable, such as in Canada](https://www.canada.ca/en/revenue-agency/programs/about-canada-revenue-agency-cra/compliance/digital-currency/cryptocurrency-guide.html), today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated [CryptoPunks](https://www.larvalabs.com/cryptopunks).\n\nGenerative Adversarial Networks, often known just as *GANs*, are a specific class of deep-learning models that are designed to learn from an input dataset to create (*generate!*) new material that is convincingly similar to elements of the original training set. Famously, the website [thispersondoesnotexist.com](https://thispersondoesnotexist.com/) went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even [music](https://salu133445.github.io/musegan/)!\n\nToday we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:\n\n\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained model, also install `torch` and `torchvision`.\n\n## GANs: a very brief introduction\n\nOriginally proposed in [Goodfellow et al. 2014](https://arxiv.org/abs/1406.2661), GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the *generator*, is responsible for generating images. The other network, the *discriminator*, receives an image at a time from the generator along with a **real** image from the training data set. The discriminator then has to guess: which image is the fake?\n\nThe generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (*adversarial!*) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!\n\nFor a more in-depth look at GANs, you can take a look at [this excellent post on Analytics Vidhya](https://www.analyticsvidhya.com/blog/2021/06/a-detailed-explanation-of-gan-with-implementation-using-tensorflow-and-keras/) or this [PyTorch tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html). For now, though, we'll dive into a demo!\n\n## Step 1 \u2014 Create the Generator model\n\nTo generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:\n\n```python\nfrom torch import nn\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n```\n\nWe're taking the generator from [this repo by @teddykoker](https://github.com/teddykoker/cryptopunks-gan/blob/main/train.py#L90), where you can also see the original discriminator model structure.\n\nAfter instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at [nateraw/cryptopunks-gan](https://huggingface.co/nateraw/cryptopunks-gan):\n\n```python\nfrom huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n```\n\n## Step 2 \u2014 Defining a `predict` function\n\nThe `predict` function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our `predict` function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use `torchvision`'s `save_image` function to save the output of the model as a `png` file, and return the file name:\n\n```python\nfrom torchvision.utils import save_image\n\ndef predict(seed):\n num_punks = 4\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWe're giving our `predict` function a `seed` parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.\n\n*Note!* Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.\n\n## Step 3 \u2014 Creating a Gradio interface\n\nAt this point you can even run the code you have with `predict()`, and you'll find your freshly generated punks in your file system at `./punks.png`. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:\n\n* Set a slider input so users can choose the \"seed\" value\n* Use an image component for our output to showcase the generated punks\n* Use our `predict()` to take the seed and generate the images\n\nWith `gr.Interface()`, we can define all of that with a single function call:\n\n```python\nimport gradio as gr\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n ],\n outputs=\"image\",\n).launch()\n```\n\nLaunching the interface should present you with something like this:\n\n\n\n## Step 4 \u2014 Even more punks!\n\nGenerating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the `inputs` list that we pass to `gr.Interface`:\n\n```python\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n ],\n outputs=\"image\",\n).launch()\n```\n\nThe new input will be passed to our `predict()` function, so we have to make some changes to that function to accept a new parameter:\n\n```python\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWhen you relaunch your interface, you should see a second slider that'll let you control the number of punks!\n\n## Step 5 - Polishing it up\n\nYour Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728\n\nWe can add some examples that users can easily try out by adding this to the `gr.Interface`:\n\n```python\ngr.Interface(\n # ...\n # keep everything as it is, and then add\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n```\n\nThe `examples` parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the `inputs`. So in our case, `[seed, num_punks]`. Give it a try!\n\nYou can also try adding a `title`, `description`, and `article` to the `gr.Interface`. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 `article` will also accept HTML, as [explored in a previous guide](/guides/key-features/#descriptive-content)!\n\nWhen you're all done, you may end up with something like this:\n\n\n\nFor reference, here is our full code:\n\n```python\nimport torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n ],\n outputs=\"image\",\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n```\n----------\n\nCongratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can [scour the Hub for more GANs](https://huggingface.co/models?other=gan) (or train your own) and continue making even more awesome demos \ud83e\udd17", - "html": "

Create Your Own Friends with a GAN

\n\n

Introduction

\n\n

It seems that cryptocurrencies, NFTs, and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets may be taxable, such as in Canada, today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated CryptoPunks.

\n\n

Generative Adversarial Networks, often known just as GANs, are a specific class of deep-learning models that are designed to learn from an input dataset to create (generate!) new material that is convincingly similar to elements of the original training set. Famously, the website thispersondoesnotexist.com went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even music!

\n\n

Today we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:

\n\n\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained model, also install torch and torchvision.

\n\n

GANs: a very brief introduction

\n\n

Originally proposed in Goodfellow et al. 2014, GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the generator, is responsible for generating images. The other network, the discriminator, receives an image at a time from the generator along with a real image from the training data set. The discriminator then has to guess: which image is the fake?

\n\n

The generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (adversarial!) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!

\n\n

For a more in-depth look at GANs, you can take a look at this excellent post on Analytics Vidhya or this PyTorch tutorial. For now, though, we'll dive into a demo!

\n\n

Step 1 \u2014 Create the Generator model

\n\n

To generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:

\n\n
from torch import nn\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n
\n\n

We're taking the generator from this repo by @teddykoker, where you can also see the original discriminator model structure.

\n\n

After instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at nateraw/cryptopunks-gan:

\n\n
from huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n
\n\n

Step 2 \u2014 Defining a predict function

\n\n

The predict function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our predict function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use torchvision's save_image function to save the output of the model as a png file, and return the file name:

\n\n
from torchvision.utils import save_image\n\ndef predict(seed):\n    num_punks = 4\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

We're giving our predict function a seed parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.

\n\n

Note! Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.

\n\n

Step 3 \u2014 Creating a Gradio interface

\n\n

At this point you can even run the code you have with predict(<SOME_NUMBER>), and you'll find your freshly generated punks in your file system at ./punks.png. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:

\n\n
    \n
  • Set a slider input so users can choose the \"seed\" value
  • \n
  • Use an image component for our output to showcase the generated punks
  • \n
  • Use our predict() to take the seed and generate the images
  • \n
\n\n

With gr.Interface(), we can define all of that with a single function call:

\n\n
import gradio as gr\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

Launching the interface should present you with something like this:

\n\n\n\n

Step 4 \u2014 Even more punks!

\n\n

Generating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the inputs list that we pass to gr.Interface:

\n\n
gr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

The new input will be passed to our predict() function, so we have to make some changes to that function to accept a new parameter:

\n\n
def predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

When you relaunch your interface, you should see a second slider that'll let you control the number of punks!

\n\n

Step 5 - Polishing it up

\n\n

Your Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728

\n\n

We can add some examples that users can easily try out by adding this to the gr.Interface:

\n\n
gr.Interface(\n    # ...\n    # keep everything as it is, and then add\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n
\n\n

The examples parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the inputs. So in our case, [seed, num_punks]. Give it a try!

\n\n

You can also try adding a title, description, and article to the gr.Interface. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 article will also accept HTML, as explored in a previous guide!

\n\n

When you're all done, you may end up with something like this:

\n\n\n\n

For reference, here is our full code:

\n\n
import torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n    ],\n    outputs=\"image\",\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n
\n\n
\n\n

Congratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can scour the Hub for more GANs (or train your own) and continue making even more awesome demos \ud83e\udd17

\n", - "tags": ["GAN", "IMAGE", "HUB"], - "spaces": [ - "https://huggingface.co/spaces/NimaBoscarino/cryptopunks", - "https://huggingface.co/spaces/nateraw/cryptopunks-generator" - ], - "url": "/guides/create-your-own-friends-with-a-gan/", - "contributor": "Nima Boscarino and Nate Raw" - } - ], - "parent": "gradio", - "prev_obj": "load", - "next_obj": "Progress" - }, - "progress": { - "class": null, - "name": "Progress", - "description": "The Progress class provides a custom progress tracker that is used in a function signature. To attach a Progress tracker to a function, simply add a parameter right after the input parameters that has a default value set to a `gradio.Progress()` instance. The Progress tracker can then be updated in the function by calling the Progress object or using the `tqdm` method on an Iterable. The Progress tracker is currently only available with `queue()`.", - "tags": { "demos": "progress" }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "track_tqdm", - "annotation": "bool", - "doc": "If True, the Progress object will track any tqdm.tqdm iterations with the tqdm library in the function.", - "default": "False" - } - ], - "returns": { "annotation": null }, - "example": "import gradio as gr\nimport time\ndef my_function(x, progress=gr.Progress()):\n progress(0, desc=\"Starting...\")\n time.sleep(1)\n for i in progress.tqdm(range(100)):\n time.sleep(0.1)\n return x\ngr.Interface(my_function, gr.Textbox(), gr.Textbox()).queue().launch()", - "fns": [ - { - "fn": null, - "name": "__call__", - "description": "Updates progress tracker with progress and message text.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "progress", - "annotation": "float | tuple[int, int | None] | None", - "doc": "If float, should be between 0 and 1 representing completion. If Tuple, first number represents steps completed, and second value represents total steps or None if unknown. If None, hides progress bar." - }, - { - "name": "desc", - "annotation": "str | None", - "doc": "description to display.", - "default": "None" - }, - { - "name": "total", - "annotation": "int | None", - "doc": "estimated total number of steps.", - "default": "None" - }, - { - "name": "unit", - "annotation": "str", - "doc": "unit of iterations.", - "default": "\"steps\"" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Progress" - }, - { - "fn": null, - "name": "tqdm", - "description": "Attaches progress tracker to iterable, like tqdm.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "iterable", - "annotation": "Iterable | None", - "doc": "iterable to attach progress tracker to." - }, - { - "name": "desc", - "annotation": "str | None", - "doc": "description to display.", - "default": "None" - }, - { - "name": "total", - "annotation": "int | None", - "doc": "estimated total number of steps.", - "default": "None" - }, - { - "name": "unit", - "annotation": "str", - "doc": "unit of iterations.", - "default": "\"steps\"" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Progress" - } - ], - "demos": [ - [ - "progress", - "import gradio as gr\nimport random\nimport time\nimport tqdm\nfrom datasets import load_dataset\nimport shutil\nfrom uuid import uuid4\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text = gr.Textbox()\n textb = gr.Textbox()\n with gr.Row():\n load_set_btn = gr.Button(\"Load Set\")\n load_nested_set_btn = gr.Button(\"Load Nested Set\")\n load_random_btn = gr.Button(\"Load Random\")\n clean_imgs_btn = gr.Button(\"Clean Images\")\n wait_btn = gr.Button(\"Wait\")\n do_all_btn = gr.Button(\"Do All\")\n track_tqdm_btn = gr.Button(\"Bind TQDM\")\n bind_internal_tqdm_btn = gr.Button(\"Bind Internal TQDM\")\n\n text2 = gr.Textbox()\n\n # track list\n def load_set(text, text2, progress=gr.Progress()):\n imgs = [None] * 24\n for img in progress.tqdm(imgs, desc=\"Loading from list\"):\n time.sleep(0.1)\n return \"done\"\n load_set_btn.click(load_set, [text, textb], text2)\n\n # track nested list\n def load_nested_set(text, text2, progress=gr.Progress()):\n imgs = [[None] * 8] * 3\n for img_set in progress.tqdm(imgs, desc=\"Nested list\"):\n time.sleep(2)\n for img in progress.tqdm(img_set, desc=\"inner list\"):\n time.sleep(0.1)\n return \"done\"\n load_nested_set_btn.click(load_nested_set, [text, textb], text2)\n\n # track iterable of unknown length\n def load_random(data, progress=gr.Progress()):\n def yielder():\n for i in range(0, random.randint(15, 20)):\n time.sleep(0.1)\n yield None\n for img in progress.tqdm(yielder()):\n pass\n return \"done\"\n load_random_btn.click(load_random, {text, textb}, text2)\n \n # manual progress\n def clean_imgs(text, progress=gr.Progress()):\n progress(0.2, desc=\"Collecting Images\")\n time.sleep(1)\n progress(0.5, desc=\"Cleaning Images\")\n time.sleep(1.5)\n progress(0.8, desc=\"Sending Images\")\n time.sleep(1.5)\n return \"done\"\n clean_imgs_btn.click(clean_imgs, text, text2)\n\n # no progress\n def wait(text):\n time.sleep(4)\n return \"done\"\n wait_btn.click(wait, text, text2)\n\n # multiple progressions\n def do_all(data, progress=gr.Progress()):\n load_set(data[text], data[textb], progress)\n load_random(data, progress)\n clean_imgs(data[text], progress)\n progress(None)\n wait(text)\n return \"done\"\n do_all_btn.click(do_all, {text, textb}, text2)\n\n def track_tqdm(data, progress=gr.Progress(track_tqdm=True)):\n for i in tqdm.tqdm(range(5), desc=\"outer\"):\n for j in tqdm.tqdm(range(4), desc=\"inner\"):\n time.sleep(1)\n return \"done\"\n track_tqdm_btn.click(track_tqdm, {text, textb}, text2)\n\n def bind_internal_tqdm(data, progress=gr.Progress(track_tqdm=True)):\n outdir = \"__tmp/\" + str(uuid4())\n load_dataset(\"beans\", split=\"train\", cache_dir=outdir)\n shutil.rmtree(outdir)\n return \"done\"\n bind_internal_tqdm_btn.click(bind_internal_tqdm, {text, textb}, text2)\n\n\nif __name__ == \"__main__\":\n demo.queue(concurrency_count=20).launch()\n" - ] - ], - "parent": "gradio", - "prev_obj": "Examples", - "next_obj": "update" - }, - "update": { - "class": null, - "name": "update", - "description": "Updates component properties. When a function passed into a Gradio Interface or a Blocks events returns a typical value, it updates the value of the output component. But it is also possible to update the properties of an output component (such as the number of lines of a `Textbox` or the visibility of an `Image`) by returning the component's `update()` function, which takes as parameters any of the constructor parameters for that component. This is a shorthand for using the update method on a component. For example, rather than using gr.Number.update(...) you can just use gr.update(...). Note that your editor's autocompletion will suggest proper parameters if you use the update method on the component.
", - "tags": { - "demos": "blocks_essay, blocks_update, blocks_essay_update", - "parameters": "kwargs: Key-word arguments used to update the component's properties." - }, - "parameters": [ - { - "name": "kwargs", - "annotation": "", - "doc": "Key-word arguments used to update the component's properties." - } - ], - "returns": { "annotation": null }, - "example": "# Blocks Example\nimport gradio as gr\nwith gr.Blocks() as demo:\n radio = gr.Radio([1, 2, 4], label=\"Set the value of the number\")\n number = gr.Number(value=2, interactive=True)\n radio.change(fn=lambda value: gr.update(value=value), inputs=radio, outputs=number)\ndemo.launch()\n\n# Interface example\nimport gradio as gr\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.Textbox.update(lines=2, visible=True)\n elif choice == \"long\":\n return gr.Textbox.update(lines=8, visible=True)\n else:\n return gr.Textbox.update(visible=False)\ngr.Interface(\n change_textbox,\n gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n ),\n gr.Textbox(lines=2),\n live=True,\n).launch()", - "fns": [], - "demos": [ - [ - "blocks_essay", - "import gradio as gr\n\n\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.Textbox.update(lines=2, visible=True)\n elif choice == \"long\":\n return gr.Textbox.update(lines=8, visible=True)\n else:\n return gr.Textbox.update(visible=False)\n\n\nwith gr.Blocks() as demo:\n radio = gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n )\n text = gr.Textbox(lines=2, interactive=True).style(show_copy_button=True)\n\n radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "blocks_update", - "import gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Animal Generator\n Once you select a species, the detail panel should be visible.\n \"\"\"\n )\n\n species = gr.Radio(label=\"Animal Class\", choices=[\"Mammal\", \"Fish\", \"Bird\"])\n animal = gr.Dropdown(label=\"Animal\", choices=[])\n\n with gr.Column(visible=False) as details_col:\n weight = gr.Slider(0, 20)\n details = gr.Textbox(label=\"Extra Details\")\n generate_btn = gr.Button(\"Generate\")\n output = gr.Textbox(label=\"Output\")\n\n species_map = {\n \"Mammal\": [\"Elephant\", \"Giraffe\", \"Hamster\"],\n \"Fish\": [\"Shark\", \"Salmon\", \"Tuna\"],\n \"Bird\": [\"Chicken\", \"Eagle\", \"Hawk\"],\n }\n\n def filter_species(species):\n return gr.Dropdown.update(\n choices=species_map[species], value=species_map[species][1]\n ), gr.update(visible=True)\n\n species.change(filter_species, species, [animal, details_col])\n\n def filter_weight(animal):\n if animal in (\"Elephant\", \"Shark\", \"Giraffe\"):\n return gr.update(maximum=100)\n else:\n return gr.update(maximum=20)\n\n animal.change(filter_weight, animal, weight)\n weight.change(lambda w: gr.update(lines=int(w / 10) + 1), weight, details)\n\n generate_btn.click(lambda x: x, details, output)\n\n\nif __name__ == \"__main__\":\n demo.launch()" - ], - [ - "blocks_essay_update", - "import gradio as gr\n\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.update(lines=2, visible=True, value=\"Short story: \")\n elif choice == \"long\":\n return gr.update(lines=8, visible=True, value=\"Long story...\")\n else:\n return gr.update(visible=False)\n\nwith gr.Blocks() as demo:\n radio = gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"Essay Length to Write?\"\n )\n text = gr.Textbox(lines=2, interactive=True)\n radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\nif __name__ == \"__main__\":\n demo.launch()" - ] - ], - "parent": "gradio", - "prev_obj": "Progress", - "next_obj": "make_waveform" - }, - "make_waveform": { - "class": null, - "name": "make_waveform", - "description": "Generates a waveform video from an audio file. Useful for creating an easy to share audio visualization. The output should be passed into a `gr.Video` component.", - "tags": { - "parameters": "audio: Audio file path or tuple of (sample_rate, audio_data)
bg_color: Background color of waveform (ignored if bg_image is provided)
bg_image: Background image of waveform
fg_alpha: Opacity of foreground waveform
bars_color: Color of waveform bars. Can be a single color or a tuple of (start_color, end_color) of gradient
bar_count: Number of bars in waveform
bar_width: Width of bars in waveform. 1 represents full width, 0.5 represents half width, etc.
animate: If true, the audio waveform overlay will be animated, if false, it will be static.", - "returns": "A filepath to the output video in mp4 format." - }, - "parameters": [ - { - "name": "audio", - "annotation": "str | tuple[int, np.ndarray]", - "doc": "Audio file path or tuple of (sample_rate, audio_data)" - }, - { - "name": "bg_color", - "annotation": "str", - "doc": "Background color of waveform (ignored if bg_image is provided)", - "default": "\"#f3f4f6\"" - }, - { - "name": "bg_image", - "annotation": "str | None", - "doc": "Background image of waveform", - "default": "None" - }, - { - "name": "fg_alpha", - "annotation": "float", - "doc": "Opacity of foreground waveform", - "default": "0.75" - }, - { - "name": "bars_color", - "annotation": "str | tuple[str, str]", - "doc": "Color of waveform bars. Can be a single color or a tuple of (start_color, end_color) of gradient", - "default": "('#fbbf24', '#ea580c')" - }, - { - "name": "bar_count", - "annotation": "int", - "doc": "Number of bars in waveform", - "default": "50" - }, - { - "name": "bar_width", - "annotation": "float", - "doc": "Width of bars in waveform. 1 represents full width, 0.5 represents half width, etc.", - "default": "0.6" - }, - { - "name": "animate", - "annotation": "bool", - "doc": "If true, the audio waveform overlay will be animated, if false, it will be static.", - "default": "False" - } - ], - "returns": { - "annotation": null, - "doc": "A filepath to the output video in mp4 format." - }, - "example": null, - "fns": [], - "parent": "gradio", - "prev_obj": "update", - "next_obj": "EventData" - }, - "eventdata": { - "class": null, - "name": "EventData", - "description": "When a subclass of EventData is added as a type hint to an argument of an event listener method, this object will be passed as that argument. It contains information about the event that triggered the listener, such the target object, and other data related to the specific event that are attributes of the subclass.
", - "tags": { "demos": "gallery_selections, tictactoe" }, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "target", - "annotation": "Block | None", - "doc": "The target object that triggered the event. Can be used to distinguish if multiple components are bound to the same listener." - } - ], - "returns": { "annotation": null }, - "example": "table = gr.Dataframe([[1, 2, 3], [4, 5, 6]])\ngallery = gr.Gallery([(\"cat.jpg\", \"Cat\"), (\"dog.jpg\", \"Dog\")])\ntextbox = gr.Textbox(\"Hello World!\")\n\nstatement = gr.Textbox()\n\ndef on_select(evt: gr.SelectData): # SelectData is a subclass of EventData\n return f\"You selected {evt.value} at {evt.index} from {evt.target}\"\n\ntable.select(on_select, None, statement)\ngallery.select(on_select, None, statement)\ntextbox.select(on_select, None, statement)", - "fns": [], - "demos": [ - [ - "gallery_selections", - "import gradio as gr\nimport numpy as np\n\nwith gr.Blocks() as demo:\n imgs = gr.State()\n gallery = gr.Gallery()\n\n def generate_images():\n images = []\n for _ in range(9):\n image = np.ones((100, 100, 3), dtype=np.uint8) * np.random.randint(\n 0, 255, 3\n ) # image is a solid single color\n images.append(image)\n return images, images\n\n demo.load(generate_images, None, [gallery, imgs])\n\n with gr.Row():\n selected = gr.Number(show_label=False, placeholder=\"Selected\")\n darken_btn = gr.Button(\"Darken selected\")\n\n def get_select_index(evt: gr.SelectData):\n return evt.index\n\n gallery.select(get_select_index, None, selected)\n\n def darken_img(imgs, index):\n index = int(index)\n imgs[index] = np.round(imgs[index] * 0.8).astype(np.uint8)\n return imgs, imgs\n\n darken_btn.click(darken_img, [imgs, selected], [imgs, gallery])\n\nif __name__ == \"__main__\":\n demo.launch()\n" - ], - [ - "tictactoe", - "import gradio as gr\n\nwith gr.Blocks() as demo:\n turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n\n def place(board, turn, evt: gr.SelectData):\n if evt.value:\n return board, turn\n board[evt.index[0]][evt.index[1]] = turn\n turn = \"O\" if turn == \"X\" else \"X\"\n return board, turn\n\n board.select(place, [board, turn], [board, turn])\n\nif __name__ == \"__main__\":\n demo.launch()" - ] - ], - "parent": "gradio", - "prev_obj": "make_waveform", - "next_obj": "Warning" - }, - "warning": { - "class": null, - "name": "Warning", - "description": "This function allows you to pass custom warning messages to the user. You can do so simply with `gr.Warning('message here')`, and when that line is executed the custom message will appear in a modal on the demo.", - "tags": { - "parameters": "message: The warning message to be displayed to the user." - }, - "parameters": [ - { - "name": "message", - "annotation": "str", - "doc": "The warning message to be displayed to the user.", - "default": "\"Warning issued.\"" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [], - "parent": "gradio", - "prev_obj": "EventData", - "next_obj": "Info" - }, - "info": { - "class": null, - "name": "Info", - "description": "", - "tags": { - "parameters": "message: The info message to be displayed to the user." - }, - "parameters": [ - { - "name": "message", - "annotation": "str", - "doc": "The info message to be displayed to the user.", - "default": "\"Info issued.\"" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [], - "parent": "gradio", - "prev_obj": "Warning", - "next_obj": "Request" - } - }, - "routes": { - "request": { - "class": null, - "name": "Request", - "description": "A Gradio request object that can be used to access the request headers, cookies, query parameters and other information about the request from within the prediction function. The class is a thin wrapper around the fastapi.Request class. Attributes of this class include: `headers`, `client`, `query_params`, and `path_params`. If auth is enabled, the `username` attribute can be used to get the logged in user.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "request", - "annotation": "fastapi.Request | None", - "doc": "A fastapi.Request", - "default": "None" - }, - { - "name": "username", - "annotation": "str | None", - "doc": null, - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": "import gradio as gr\ndef echo(name, request: gr.Request):\n print(\"Request headers dictionary:\", request.headers)\n print(\"IP address:\", request.client.host)\n return name\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()", - "fns": [], - "parent": "gradio", - "prev_obj": "make_waveform", - "next_obj": "mount_gradio_app" - }, - "mount_gradio_app": { - "class": null, - "name": "mount_gradio_app", - "description": "Mount a gradio.Blocks to an existing FastAPI application.
", - "tags": { - "parameters": "app: The parent FastAPI application.
blocks: The blocks object we want to mount to the parent app.
path: The path at which the gradio application will be mounted.
gradio_api_url: The full url at which the gradio app will run. This is only needed if deploying to Huggingface spaces of if the websocket endpoints of your deployed app are on a different network location than the gradio app. If deploying to spaces, set gradio_api_url to 'http://localhost:7860/'
app_kwargs: Additional keyword arguments to pass to the underlying FastAPI app as a dictionary of parameter keys and argument values. For example, `{\"docs_url\": \"/docs\"}`" - }, - "parameters": [ - { - "name": "app", - "annotation": "fastapi.FastAPI", - "doc": "The parent FastAPI application." - }, - { - "name": "blocks", - "annotation": "gradio.Blocks", - "doc": "The blocks object we want to mount to the parent app." - }, - { - "name": "path", - "annotation": "str", - "doc": "The path at which the gradio application will be mounted." - }, - { - "name": "gradio_api_url", - "annotation": "str | None", - "doc": "The full url at which the gradio app will run. This is only needed if deploying to Huggingface spaces of if the websocket endpoints of your deployed app are on a different network location than the gradio app. If deploying to spaces, set gradio_api_url to 'http://localhost:7860/'", - "default": "None" - }, - { - "name": "app_kwargs", - "annotation": "dict[str, Any] | None", - "doc": "Additional keyword arguments to pass to the underlying FastAPI app as a dictionary of parameter keys and argument values. For example, `{\"docs_url\": \"/docs\"}`", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": "from fastapi import FastAPI\nimport gradio as gr\napp = FastAPI()\n@app.get(\"/\")\ndef read_main():\n return {\"message\": \"This is your main app\"}\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=\"/gradio\")\n# Then run `uvicorn run:app` from the terminal and navigate to http://localhost:8000/gradio.", - "fns": [], - "parent": "gradio", - "prev_obj": "Request", - "next_obj": "Python-Client" - } - }, - "events": [ - "change", - "input", - "click", - "submit", - "edit", - "clear", - "play", - "pause", - "stop", - "end", - "stream", - "start_recording", - "stop_recording", - "focus", - "blur", - "upload", - "release", - "select" - ], - "py-client": { - "client": { - "class": null, - "name": "Client", - "description": "The main Client class for the Python client. This class is used to connect to a remote Gradio app and call its API endpoints.
", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "src", - "annotation": "str", - "doc": "Either the name of the Hugging Face Space to load, (e.g. \"abidlabs/whisper-large-v2\") or the full URL (including \"http\" or \"https\") of the hosted Gradio app to load (e.g. \"http://mydomain.com/app\" or \"https://bec81a83-5b5c-471e.gradio.live/\")." - }, - { - "name": "hf_token", - "annotation": "str | None", - "doc": "The Hugging Face token to use to access private Spaces. Automatically fetched if you are logged in via the Hugging Face Hub CLI. Obtain from: https://huggingface.co/settings/token", - "default": "None" - }, - { - "name": "max_workers", - "annotation": "int", - "doc": "The maximum number of thread workers that can be used to make requests to the remote Gradio app simultaneously.", - "default": "40" - }, - { - "name": "serialize", - "annotation": "bool", - "doc": "Whether the client should serialize the inputs and deserialize the outputs of the remote API. If set to False, the client will pass the inputs and outputs as-is, without serializing/deserializing them. E.g. you if you set this to False, you'd submit an image in base64 format instead of a filepath, and you'd get back an image in base64 format from the remote API instead of a filepath.", - "default": "True" - }, - { - "name": "output_dir", - "annotation": "str | Path | None", - "doc": "The directory to save files that are downloaded from the remote API. If None, reads from the GRADIO_TEMP_DIR environment variable. Defaults to a temporary directory on your machine.", - "default": "\"/var/folders/lt/_bbyb3m10xbb9cpp4x7qs2rc0000gn/T/gradio\"" - }, - { - "name": "verbose", - "annotation": "bool", - "doc": "Whether the client should print statements to the console.", - "default": "True" - } - ], - "returns": { "annotation": null }, - "example": "from gradio_client import Client\n\nclient = Client(\"abidlabs/whisper-large-v2\") # connecting to a Hugging Face Space\nclient.predict(\"test.mp4\", api_name=\"/predict\")\n>> What a nice recording! # returns the result of the remote API call\n\nclient = Client(\"https://bec81a83-5b5c-471e.gradio.live\") # connecting to a temporary Gradio share URL\njob = client.submit(\"hello\", api_name=\"/predict\") # runs the prediction in a background thread\njob.result()\n>> 49 # returns the result of the remote API call (blocking call)", - "fns": [ - { - "fn": null, - "name": "predict", - "description": "Calls the Gradio API and returns the result (this is a blocking call).
", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "args", - "annotation": "", - "doc": "The arguments to pass to the remote API. The order of the arguments must match the order of the inputs in the Gradio app." - }, - { - "name": "api_name", - "annotation": "str | None", - "doc": "The name of the API endpoint to call starting with a leading slash, e.g. \"/predict\". Does not need to be provided if the Gradio app has only one named API endpoint.", - "default": "None" - }, - { - "name": "fn_index", - "annotation": "int | None", - "doc": "As an alternative to api_name, this parameter takes the index of the API endpoint to call, e.g. 0. Both api_name and fn_index can be provided, but if they conflict, api_name will take precedence.", - "default": "None" - } - ], - "returns": { - "annotation": "Any", - "doc": "The result of the API call. Will be a Tuple if the API has multiple outputs." - }, - "example": "from gradio_client import Client\nclient = Client(src=\"gradio/calculator\")\nclient.predict(5, \"add\", 4, api_name=\"/predict\")\n>> 9.0", - "override_signature": null, - "parent": "gradio.Client" - }, - { - "fn": null, - "name": "submit", - "description": "Creates and returns a Job object which calls the Gradio API in a background thread. The job can be used to retrieve the status and result of the remote API call.
", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "args", - "annotation": "", - "doc": "The arguments to pass to the remote API. The order of the arguments must match the order of the inputs in the Gradio app." - }, - { - "name": "api_name", - "annotation": "str | None", - "doc": "The name of the API endpoint to call starting with a leading slash, e.g. \"/predict\". Does not need to be provided if the Gradio app has only one named API endpoint.", - "default": "None" - }, - { - "name": "fn_index", - "annotation": "int | None", - "doc": "As an alternative to api_name, this parameter takes the index of the API endpoint to call, e.g. 0. Both api_name and fn_index can be provided, but if they conflict, api_name will take precedence.", - "default": "None" - }, - { - "name": "result_callbacks", - "annotation": "Callable | list[Callable] | None", - "doc": "A callback function, or list of callback functions, to be called when the result is ready. If a list of functions is provided, they will be called in order. The return values from the remote API are provided as separate parameters into the callback. If None, no callback will be called.", - "default": "None" - } - ], - "returns": { - "annotation": "Job", - "doc": "A Job object that can be used to retrieve the status and result of the remote API call." - }, - "example": "from gradio_client import Client\nclient = Client(src=\"gradio/calculator\")\njob = client.submit(5, \"add\", 4, api_name=\"/predict\")\njob.status()\n>> \njob.result() # blocking call\n>> 9.0", - "override_signature": null, - "parent": "gradio.Client" - }, - { - "fn": null, - "name": "view_api", - "description": "Prints the usage info for the API. If the Gradio app has multiple API endpoints, the usage info for each endpoint will be printed separately. If return_format=\"dict\" the info is returned in dictionary format, as shown in the example below.
", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "all_endpoints", - "annotation": "bool | None", - "doc": "If True, prints information for both named and unnamed endpoints in the Gradio app. If False, will only print info about named endpoints. If None (default), will print info about named endpoints, unless there aren't any -- in which it will print info about unnamed endpoints.", - "default": "None" - }, - { - "name": "print_info", - "annotation": "bool", - "doc": "If True, prints the usage info to the console. If False, does not print the usage info.", - "default": "True" - }, - { - "name": "return_format", - "annotation": "Literal['dict', 'str'] | None", - "doc": "If None, nothing is returned. If \"str\", returns the same string that would be printed to the console. If \"dict\", returns the usage info as a dictionary that can be programmatically parsed, and *all endpoints are returned in the dictionary* regardless of the value of `all_endpoints`. The format of the dictionary is in the docstring of this method.", - "default": "None" - } - ], - "returns": {}, - "example": "from gradio_client import Client\nclient = Client(src=\"gradio/calculator\")\nclient.view_api(return_format=\"dict\")\n>> {\n 'named_endpoints': {\n '/predict': {\n 'parameters': [\n {\n 'label': 'num1',\n 'type_python': 'int | float',\n 'type_description': 'numeric value',\n 'component': 'Number',\n 'example_input': '5'\n },\n {\n 'label': 'operation',\n 'type_python': 'str',\n 'type_description': 'string value',\n 'component': 'Radio',\n 'example_input': 'add'\n },\n {\n 'label': 'num2',\n 'type_python': 'int | float',\n 'type_description': 'numeric value',\n 'component': 'Number',\n 'example_input': '5'\n },\n ],\n 'returns': [\n {\n 'label': 'output',\n 'type_python': 'int | float',\n 'type_description': 'numeric value',\n 'component': 'Number',\n },\n ]\n },\n '/flag': {\n 'parameters': [\n ...\n ],\n 'returns': [\n ...\n ]\n }\n }\n 'unnamed_endpoints': {\n 2: {\n 'parameters': [\n ...\n ],\n 'returns': [\n ...\n ]\n }\n }\n }\n}", - "override_signature": null, - "parent": "gradio.Client" - }, - { - "fn": null, - "name": "duplicate", - "description": "Duplicates a Hugging Face Space under your account and returns a Client object for the new Space. No duplication is created if the Space already exists in your account (to override this, provide a new name for the new Space using `to_id`). To use this method, you must provide an `hf_token` or be logged in via the Hugging Face Hub CLI.
The new Space will be private by default and use the same hardware as the original Space. This can be changed by using the `private` and `hardware` parameters. For hardware upgrades (beyond the basic CPU tier), you may be required to provide billing information on Hugging Face: https://huggingface.co/settings/billing
", - "tags": {}, - "parameters": [ - { - "name": "from_id", - "annotation": "str", - "doc": "The name of the Hugging Face Space to duplicate in the format \"{username}/{space_id}\", e.g. \"gradio/whisper\"." - }, - { - "name": "to_id", - "annotation": "str | None", - "doc": "The name of the new Hugging Face Space to create, e.g. \"abidlabs/whisper-duplicate\". If not provided, the new Space will be named \"{your_HF_username}/{space_id}\".", - "default": "None" - }, - { - "name": "hf_token", - "annotation": "str | None", - "doc": "The Hugging Face token to use to access private Spaces. Automatically fetched if you are logged in via the Hugging Face Hub CLI. Obtain from: https://huggingface.co/settings/token", - "default": "None" - }, - { - "name": "private", - "annotation": "bool", - "doc": "Whether the new Space should be private (True) or public (False). Defaults to True.", - "default": "True" - }, - { - "name": "hardware", - "annotation": "Literal['cpu-basic', 'cpu-upgrade', 't4-small', 't4-medium', 'a10g-small', 'a10g-large', 'a100-large'] | SpaceHardware | None", - "doc": "The hardware tier to use for the new Space. Defaults to the same hardware tier as the original Space. Options include \"cpu-basic\", \"cpu-upgrade\", \"t4-small\", \"t4-medium\", \"a10g-small\", \"a10g-large\", \"a100-large\", subject to availability.", - "default": "None" - }, - { - "name": "secrets", - "annotation": "dict[str, str] | None", - "doc": "A dictionary of (secret key, secret value) to pass to the new Space. Defaults to None. Secrets are only used when the Space is duplicated for the first time, and are not updated if the duplicated Space already exists.", - "default": "None" - }, - { - "name": "sleep_timeout", - "annotation": "int", - "doc": "The number of minutes after which the duplicate Space will be puased if no requests are made to it (to minimize billing charges). Defaults to 5 minutes.", - "default": "5" - }, - { - "name": "max_workers", - "annotation": "int", - "doc": "The maximum number of thread workers that can be used to make requests to the remote Gradio app simultaneously.", - "default": "40" - }, - { - "name": "verbose", - "annotation": "bool", - "doc": "Whether the client should print statements to the console.", - "default": "True" - } - ], - "returns": {}, - "example": "import os\nfrom gradio_client import Client\nHF_TOKEN = os.environ.get(\"HF_TOKEN\")\nclient = Client.duplicate(\"abidlabs/whisper\", hf_token=HF_TOKEN)\nclient.predict(\"audio_sample.wav\")\n>> \"This is a test of the whisper speech recognition model.\"", - "override_signature": null, - "parent": "gradio.Client" - }, - { - "fn": null, - "name": "deploy_discord", - "description": "Deploy the upstream app as a discord bot. Currently only supports gr.ChatInterface.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "discord_bot_token", - "annotation": "str | None", - "doc": "This is the \"password\" needed to be able to launch the bot. Users can get a token by creating a bot app on the discord website. If run the method without specifying a token, the space will explain how to get one. See here: https://huggingface.co/spaces/freddyaboulton/test-discord-bot-v1.", - "default": "None" - }, - { - "name": "api_names", - "annotation": "list[str | tuple[str, str]] | None", - "doc": "The api_names of the app to turn into bot commands. This parameter currently has no effect as ChatInterface only has one api_name ('/chat').", - "default": "None" - }, - { - "name": "to_id", - "annotation": "str | None", - "doc": "The name of the space hosting the discord bot. If None, the name will be gradio-discord-bot-{random-substring}", - "default": "None" - }, - { - "name": "hf_token", - "annotation": "str | None", - "doc": "HF api token with write priviledges in order to upload the files to HF space. Can be ommitted if logged in via the HuggingFace CLI, unless the upstream space is private. Obtain from: https://huggingface.co/settings/token", - "default": "None" - }, - { - "name": "private", - "annotation": "bool", - "doc": "Whether the space hosting the discord bot is private. The visibility of the discord bot itself is set via the discord website. See https://huggingface.co/spaces/freddyaboulton/test-discord-bot-v1", - "default": "False" - } - ], - "returns": {}, - "example": null, - "override_signature": null, - "parent": "gradio.Client" - } - ], - "parent": "gradio", - "prev_obj": "Python-Client", - "next_obj": "Job" - }, - "job": { - "class": null, - "name": "Job", - "description": "A Job is a wrapper over the Future class that represents a prediction call that has been submitted by the Gradio client. This class is not meant to be instantiated directly, but rather is created by the Client.submit() method.
A Job object includes methods to get the status of the prediction call, as well to get the outputs of the prediction call. Job objects are also iterable, and can be used in a loop to get the outputs of prediction calls as they become available for generator endpoints.", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "future", - "annotation": "Future", - "doc": "The future object that represents the prediction call, created by the Client.submit() method" - }, - { - "name": "communicator", - "annotation": "Communicator | None", - "doc": "The communicator object that is used to communicate between the client and the background thread running the job", - "default": "None" - }, - { - "name": "verbose", - "annotation": "bool", - "doc": "Whether to print any status-related messages to the console", - "default": "True" - }, - { - "name": "space_id", - "annotation": "str | None", - "doc": "The space ID corresponding to the Client object that created this Job object", - "default": "None" - } - ], - "returns": { "annotation": null }, - "example": null, - "fns": [ - { - "fn": null, - "name": "result", - "description": "Return the result of the call that the future represents. Raises CancelledError: If the future was cancelled, TimeoutError: If the future didn't finish executing before the given timeout, and Exception: If the call raised then that exception will be raised.
", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - }, - { - "name": "timeout", - "annotation": "float | None", - "doc": "The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time.", - "default": "None" - } - ], - "returns": { - "annotation": "Any", - "doc": "The result of the call that the future represents." - }, - "example": "from gradio_client import Client\ncalculator = Client(src=\"gradio/calculator\")\njob = calculator.submit(\"foo\", \"add\", 4, fn_index=0)\njob.result(timeout=5)\n>> 9", - "override_signature": null, - "parent": "gradio.Job" - }, - { - "fn": null, - "name": "outputs", - "description": "Returns a list containing the latest outputs from the Job.
If the endpoint has multiple output components, the list will contain a tuple of results. Otherwise, it will contain the results without storing them in tuples.
For endpoints that are queued, this list will contain the final job output even if that endpoint does not use a generator function.
", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": {}, - "example": "from gradio_client import Client\nclient = Client(src=\"gradio/count_generator\")\njob = client.submit(3, api_name=\"/count\")\nwhile not job.done():\n time.sleep(0.1)\njob.outputs()\n>> ['0', '1', '2']", - "override_signature": null, - "parent": "gradio.Job" - }, - { - "fn": null, - "name": "status", - "description": "Returns the latest status update from the Job in the form of a StatusUpdate object, which contains the following fields: code, rank, queue_size, success, time, eta, and progress_data.
progress_data is a list of updates emitted by the gr.Progress() tracker of the event handler. Each element of the list has the following fields: index, length, unit, progress, desc. If the event handler does not have a gr.Progress() tracker, the progress_data field will be None.
", - "tags": {}, - "parameters": [ - { - "name": "self", - "annotation": "", - "doc": null - } - ], - "returns": {}, - "example": "from gradio_client import Client\nclient = Client(src=\"gradio/calculator\")\njob = client.submit(5, \"add\", 4, api_name=\"/predict\")\njob.status()\n>> \njob.status().eta\n>> 43.241 # seconds", - "override_signature": null, - "parent": "gradio.Job" - } - ], - "parent": "gradio", - "prev_obj": "Client", - "next_obj": "JS-Client" - } - }, - "events_matrix": { - "AnnotatedImage": ["select"], - "Audio": [ - "change", - "clear", - "play", - "pause", - "stop", - "end", - "stream", - "start_recording", - "stop_recording", - "upload" - ], - "BarPlot": ["change", "clear"], - "Button": ["click"], - "Chatbot": ["change", "select"], - "Checkbox": ["change", "input", "select"], - "CheckboxGroup": ["change", "input", "select"], - "ClearButton": ["click"], - "Code": ["change", "input"], - "ColorPicker": ["change", "input", "submit", "focus", "blur"], - "Dataframe": ["change", "input", "select"], - "Dataset": ["click", "select"], - "Dropdown": ["change", "input", "focus", "blur", "select"], - "DuplicateButton": ["click"], - "File": ["change", "clear", "upload", "select"], - "Gallery": ["select"], - "HTML": ["change"], - "HighlightedText": ["change", "select"], - "Image": ["change", "edit", "clear", "stream", "upload", "select"], - "Interpretation": [], - "JSON": ["change"], - "Label": ["change", "select"], - "LinePlot": ["change", "clear"], - "Markdown": ["change"], - "Model3D": ["change", "edit", "clear", "upload"], - "Number": ["change", "input", "submit", "focus", "blur"], - "Plot": ["change", "clear"], - "Radio": ["change", "input", "select"], - "ScatterPlot": ["change", "clear"], - "Slider": ["change", "input", "release"], - "State": [], - "Textbox": ["change", "input", "submit", "focus", "blur", "select"], - "Timeseries": ["change"], - "UploadButton": ["click", "upload"], - "Video": [ - "change", - "clear", - "play", - "pause", - "stop", - "end", - "start_recording", - "stop_recording", - "upload" - ] - } - }, - "pages": [ - "client", - "job", - "error", - "load", - "examples", - "progress", - "update", - "make_waveform", - "eventdata", - "warning", - "info", - "simplecsvlogger", - "csvlogger", - "huggingfacedatasetsaver", - "request", - "mount_gradio_app", - "base", - "queue", - "blocks", - "changeable", - "inputable", - "clickable", - "submittable", - "editable", - "clearable", - "playable", - "streamable", - "recordable", - "focusable", - "uploadable", - "releaseable", - "selectable", - "row", - "column", - "tab", - "group", - "box", - "accordion", - "annotatedimage", - "audio", - "barplot", - "button", - "chatbot", - "checkbox", - "checkboxgroup", - "clearbutton", - "code", - "colorpicker", - "dataframe", - "dataset", - "dropdown", - "duplicatebutton", - "file", - "gallery", - "html", - "highlightedtext", - "image", - "interpretation", - "json", - "label", - "lineplot", - "markdown", - "model3d", - "number", - "plot", - "radio", - "scatterplot", - "slider", - "state", - "textbox", - "timeseries", - "uploadbutton", - "video", - "chatinterface", - "interface", - "tabbedinterface", - "parallel", - "series" - ], - "js_client": "## JavaScript Client Library\n\nA javascript (and typescript) client to call Gradio APIs.\n\n## Installation\n\nThe Gradio JavaScript client is available on npm as `@gradio/client`. You can install it as below:\n\n```sh\nnpm i @gradio/client\n```\n\n## Usage\n\nThe JavaScript Gradio Client exposes two named imports, `client` and `duplicate`.\n\n### `client`\n\nThe client function connects to the API of a hosted Gradio space and returns an object that allows you to make calls to that API.\n\nThe simplest example looks like this:\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst result = await app.predict(\"/predict\");\n```\n\nThis function accepts two arguments: `source` and `options`:\n\n#### `source`\n\nThis is the url or name of the gradio app whose API you wish to connect to. This parameter is required and should always be a string. For example:\n\n```ts\nclient(\"user/space-name\");\n```\n\n#### `options`\n\nThe options object can optionally be passed a second parameter. This object has two properties, `hf_token` and `status_callback`.\n\n##### `hf_token`\n\nThis should be a Hugging Face personal access token and is required if you wish to make calls to a private gradio api. This option is optional and should be a string starting with `\"hf_\"`.\n\nExample:\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\", { hf_token: \"hf_...\" });\n```\n\n##### `status_callback`\n\nThis should be a function which will notify your of the status of a space if it is not running. If the gradio API you are connecting to is awake and running or is not hosted on Hugging Face space then this function will do nothing.\n\n**Additional context**\n\nApplications hosted on Hugging Face spaces can be in a number of different states. As spaces are a GitOps tool and will rebuild when new changes are pushed to the repository, they have various building, running and error states. If a space is not 'running' then the function passed as the `status_callback` will notify you of the current state of the space and the status of the space as it changes. Spaces that are building or sleeping can take longer than usual to respond, so you can use this information to give users feedback about the progress of their action.\n\n```ts\nimport { client, type SpaceStatus } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\", {\n // The space_status parameter does not need to be manually annotated, this is just for illustration.\n space_status: (space_status: SpaceStatus) => console.log(space_status),\n});\n```\n\n```ts\ninterface SpaceStatusNormal {\n\tstatus: \"sleeping\" | \"running\" | \"building\" | \"error\" | \"stopped\";\n\tdetail:\n\t\t| \"SLEEPING\"\n\t\t| \"RUNNING\"\n\t\t| \"RUNNING_BUILDING\"\n\t\t| \"BUILDING\"\n\t\t| \"NOT_FOUND\";\n\tload_status: \"pending\" | \"error\" | \"complete\" | \"generating\";\n\tmessage: string;\n}\n\ninterface SpaceStatusError {\n\tstatus: \"space_error\";\n\tdetail: \"NO_APP_FILE\" | \"CONFIG_ERROR\" | \"BUILD_ERROR\" | \"RUNTIME_ERROR\";\n\tload_status: \"error\";\n\tmessage: string;\n\tdiscussions_enabled: boolean;\n\ntype SpaceStatus = SpaceStatusNormal | SpaceStatusError;\n```\n\nThe gradio client returns an object with a number of methods and properties:\n\n#### `predict`\n\nThe `predict` method allows you to call an api endpoint and get a prediction result:\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst result = await app.predict(\"/predict\");\n```\n\n`predict` accepts two parameters, `endpoint` and `payload`. It returns a promise that resolves to the prediction result.\n\n##### `endpoint`\n\nThis is the endpoint for an api request and is required. The default endpoint for a `gradio.Interface` is `\"/predict\"`. Explicitly named endpoints have a custom name. The endpoint names can be found on the \"View API\" page of a space.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst result = await app.predict(\"/predict\");\n```\n\n##### `payload`\n\nThe `payload` argument is generally optional but this depends on the API itself. If the API endpoint depends on values being passed in then it is required for the API request to succeed. The data that should be passed in is detailed on the \"View API\" page of a space, or accessible via the `view_api()` method of the client.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst result = await app.predict(\"/predict\", [1, \"Hello\", \"friends\"]);\n```\n\n#### `submit`\n\nThe `submit` method provides a more flexible way to call an API endpoint, providing you with status updates about the current progress of the prediction as well as supporting more complex endpoint types.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst submission = app.submit(\"/predict\", payload);\n```\n\nThe `submit` method accepts the same [`endpoint`](#endpoint) and [`payload`](#payload) arguments as `predict`.\n\nThe `submit` method does not return a promise and should not be awaited, instead it returns an object with a `on`, `off`, and `cancel` methods.\n\n##### `on`\n\nThe `on` method allows you to subscribe to events related to the submitted API request. There are two types of event that can be subscribed to: `\"data\"` updates and `\"status\"` updates.\n\n`\"data\"` updates are issued when the API computes a value, the callback provided as the second argument will be called when such a value is sent to the client. The shape of the data depends on the way the API itself is constructed. This event may fire more than once if that endpoint supports emmitting new values over time.\n\n`\"status` updates are issued when the status of a request changes. This information allows you to offer feedback to users when the queue position of the request changes, or when the request changes from queued to processing.\n\nThe status payload look like this:\n\n```ts\ninterface Status {\n queue: boolean;\n code?: string;\n success?: boolean;\n stage: \"pending\" | \"error\" | \"complete\" | \"generating\";\n size?: number;\n position?: number;\n eta?: number;\n message?: string;\n progress_data?: Array<{\n progress: number | null;\n index: number | null;\n length: number | null;\n unit: string | null;\n desc: string | null;\n }>;\n time?: Date;\n}\n```\n\nUsage of these subscribe callback looks like this:\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst submission = app\n .submit(\"/predict\", payload)\n .on(\"data\", (data) => console.log(data))\n .on(\"status\", (status: Status) => console.log(status));\n```\n\n##### `off`\n\nThe `off` method unsubscribes from a specific event of the submitted job and works similarly to `document.removeEventListener`; both the event name and the original callback must be passed in to successfully unsubscribe:\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst handle_data = (data) => console.log(data);\n\nconst submission = app.submit(\"/predict\", payload).on(\"data\", handle_data);\n\n// later\nsubmission.off(\"/predict\", handle_data);\n```\n\n##### `destroy`\n\nThe `destroy` method will remove all subscriptions to a job, regardless of whether or not they are `\"data\"` or `\"status\"` events. This is a convenience method for when you do not want to unsubscribe use the `off` method.\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst handle_data = (data) => console.log(data);\n\nconst submission = app.submit(\"/predict\", payload).on(\"data\", handle_data);\n\n// later\nsubmission.destroy();\n```\n\n##### `cancel`\n\nCertain types of gradio function can run repeatedly and in some cases indefinitely. the `cancel` method will stop such an endpoints and prevent the API from issuing additional updates.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst submission = app\n .submit(\"/predict\", payload)\n .on(\"data\", (data) => console.log(data));\n\n// later\n\nsubmission.cancel();\n```\n\n#### `view_api`\n\nThe `view_api` method provides details about the API you are connected to. It returns a JavaScript object of all named endpoints, unnamed endpoints and what values they accept and return. This method does not accept arguments.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst api_info = await app.view_api();\n\nconsole.log(api_info);\n```\n\n#### `config`\n\nThe `config` property contains the configuration for the gradio application you are connected to. This object may contain useful meta information about the application.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconsole.log(app.config);\n```\n\n### `duplicate`\n\nThe duplicate function will attempt to duplicate the space that is referenced and return an instance of `client` connected to that space. If the space has already been duplicated then it will not create a new duplicate and will instead connect to the existing duplicated space. The huggingface token that is passed in will dictate the user under which the space is created.\n\n`duplicate` accepts the same arguments as `client` with the addition of a `private` options property dictating whether the duplicated space should be private or public. A huggingface token is required for duplication to work.\n\n```ts\nimport { duplicate } from \"@gradio/client\";\n\nconst app = await duplicate(\"user/space-name\", {\n hf_token: \"hf_...\",\n});\n```\n\nThis function accepts two arguments: `source` and `options`:\n\n#### `source`\n\nThe space to duplicate and connect to. [See `client`'s `source` parameter](#source).\n\n#### `options`\n\nAccepts all options that `client` accepts, except `hf_token` is required. [See `client`'s `options` parameter](#source).\n\n`duplicate` also accepts one additional `options` property.\n\n##### `private`\n\nThis is an optional property specific to `duplicate`'s options object and will determine whether the space should be public or private. Spaces duplicated via the `duplicate` method are public by default.\n\n```ts\nimport { duplicate } from \"@gradio/client\";\n\nconst app = await duplicate(\"user/space-name\", {\n hf_token: \"hf_...\",\n private: true,\n});\n```\n\n##### `timeout`\n\nThis is an optional property specific to `duplicate`'s options object and will set the timeout in minutes before the duplicated space will go to sleep.\n\n```ts\nimport { duplicate } from \"@gradio/client\";\n\nconst app = await duplicate(\"user/space-name\", {\n hf_token: \"hf_...\",\n private: true,\n timeout: 5,\n});\n```\n\n##### `hardware`\n\nThis is an optional property specific to `duplicate`'s options object and will set the hardware for the duplicated space. By default the hardware used will match that of the original space. If this cannot be obtained it will default to `\"cpu-basic\"`. For hardware upgrades (beyond the basic CPU tier), you may be required to provide [billing information on Hugging Face](https://huggingface.co/settings/billing).\n\nPossible hardware options are:\n\n- `\"cpu-basic\"`\n- `\"cpu-upgrade\"`\n- `\"t4-small\"`\n- `\"t4-medium\"`\n- `\"a10g-small\"`\n- `\"a10g-large\"`\n- `\"a100-large\"`\n\n```ts\nimport { duplicate } from \"@gradio/client\";\n\nconst app = await duplicate(\"user/space-name\", {\n hf_token: \"hf_...\",\n private: true,\n hardware: \"a10g-small\",\n});\n```\n" -} +{"docs": {"building": {"simplecsvlogger": {"class": null, "name": "SimpleCSVLogger", "description": "A simplified implementation of the FlaggingCallback abstract class provided for illustrative purposes. Each flagged sample (both the input and output data) is logged to a CSV file on the machine running the gradio app.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "import gradio as gr\ndef image_classifier(inp):\n return {'cat': 0.3, 'dog': 0.7}\ndemo = gr.Interface(fn=image_classifier, inputs=\"image\", outputs=\"label\",\n flagging_callback=SimpleCSVLogger())", "fns": [], "parent": "gradio"}, "csvlogger": {"class": null, "name": "CSVLogger", "description": "The default implementation of the FlaggingCallback abstract class. Each flagged sample (both the input and output data) is logged to a CSV file with headers on the machine running the gradio app.", "tags": {"guides": "using-flagging"}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "import gradio as gr\ndef image_classifier(inp):\n return {'cat': 0.3, 'dog': 0.7}\ndemo = gr.Interface(fn=image_classifier, inputs=\"image\", outputs=\"label\",\n flagging_callback=CSVLogger())", "fns": [], "guides": [{"name": "using-flagging", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 45, "pretty_name": "Using Flagging", "content": "# Using Flagging\n\n\n\n\n## Introduction\n\nWhen you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.\n\nGradio simplifies the collection of this data by including a **Flag** button with every `Interface`. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with `gradio.Interface` as well as with `gradio.Blocks`.\n\n## The **Flag** button in `gradio.Interface`\n\nFlagging with Gradio's `Interface` is especially easy. By default, underneath the output components, there is a button marked **Flag**. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.\n\nThere are [four parameters](https://gradio.app/docs/#interface-header) in `gradio.Interface` that control how flagging works. We will go over them in greater detail.\n\n* `allow_flagging`: this parameter can be set to either `\"manual\"` (default), `\"auto\"`, or `\"never\"`. \n * `manual`: users will see a button to flag, and samples are only flagged when the button is clicked.\n * `auto`: users will not see a button to flag, but every sample will be flagged automatically. \n * `never`: users will not see a button to flag, and no sample will be flagged. \n* `flagging_options`: this parameter can be either `None` (default) or a list of strings.\n * If `None`, then the user simply clicks on the **Flag** button and no additional options are shown.\n * If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is `[\"Incorrect\", \"Ambiguous\"]`, then buttons labeled **Flag as Incorrect** and **Flag as Ambiguous** appear. This only applies if `allow_flagging` is `\"manual\"`.\n * The chosen option is then logged along with the input and output.\n* `flagging_dir`: this parameter takes a string.\n * It represents what to name the directory where flagged data is stored.\n* `flagging_callback`: this parameter takes an instance of a subclass of the `FlaggingCallback` class\n * Using this parameter allows you to write custom code that gets run when the flag button is clicked\n * By default, this is set to an instance of `gr.CSVLogger`\n * One example is setting it to an instance of `gr.HuggingFaceDatasetSaver` which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)\n\n## What happens to flagged data?\n\nWithin the directory provided by the `flagging_dir` argument, a CSV file will log the flagged data. \n\nHere's an example: The code below creates the calculator interface embedded below it:\n\n```python\nimport gradio as gr\n\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n return num1 / num2\n\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\"\n)\n\niface.launch()\n```\n\n\n\nWhen you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged. \n\n```directory\n+-- flagged/\n| +-- logs.csv\n```\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n```\n\nIf the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an `image` input to `image` output interface will create the following structure.\n\n```directory\n+-- flagged/\n| +-- logs.csv\n| +-- image/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n_flagged/logs.csv_\n```csv\nim,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.\n\nIf we go back to the calculator example, the following code will create the interface embedded below it. \n```python\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n```\n\n\nWhen users click the flag button, the csv file will now include a column indicating the selected option.\n\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n```\n\n## The HuggingFaceDatasetSaver Callback\n\nSometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.\n\nWe've made this super easy with the `flagging_callback` parameter.\n\nFor example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:\n\n\n```python\nimport os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n flagging_callback=hf_writer\n)\n\niface.launch()\n```\n\nNotice that we define our own \ninstance of `gradio.HuggingFaceDatasetSaver` using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set `allow_flagging=\"manual\"`\nbecause on Hugging Face Spaces, `allow_flagging` is set to `\"never\"` by default. Here's our demo:\n\n\n\nYou can now see all the examples flagged above in this [public Hugging Face dataset](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo).\n\n![flagging callback hf](https://github.com/gradio-app/gradio/blob/main/guides/assets/flagging-callback-hf.png?raw=true)\n\nWe created the `gradio.HuggingFaceDatasetSaver` class, but you can pass your own custom class as long as it inherits from `FLaggingCallback` defined in [this file](https://github.com/gradio-app/gradio/blob/master/gradio/flagging.py). If you create a cool callback, contribute it to the repo! \n\n## Flagging with Blocks\n\nWhat about if you are using `gradio.Blocks`? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.\n\nAt the same time, you might want to use an existing `FlaggingCallback` to avoid writing extra code.\nThis requires two steps:\n\n1. You have to run your callback's `.setup()` somewhere in the code prior to the \nfirst time you flag data\n2. When the flagging button is clicked, then you trigger the callback's `.flag()` method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing. \n\nHere is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default `CSVLogger`:\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n sepia_filter = strength * np.array(\n [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n ) + (1-strength) * np.identity(3)\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n img_input = gr.Image()\n strength = gr.Slider(0, 1, 0.5)\n img_output = gr.Image()\n with gr.Row():\n btn = gr.Button(\"Flag\")\n \n # This needs to be called at some point prior to the first call to callback.flag()\n callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n img_input.change(sepia, [img_input, strength], img_output)\n strength.change(sepia, [img_input, strength], img_output)\n \n # We can choose which components to flag -- in this case, we'll flag all of them\n btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n```\n\n\n## Privacy\n\nImportant Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use `allow_flagging=auto` (when all of the data submitted through the demo is being flagged)\n\n### That's all! Happy building :) \n", "html": "

Using Flagging

\n\n

Introduction

\n\n

When you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.

\n\n

Gradio simplifies the collection of this data by including a Flag button with every Interface. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with gradio.Interface as well as with gradio.Blocks.

\n\n

The Flag button in gradio.Interface

\n\n

Flagging with Gradio's Interface is especially easy. By default, underneath the output components, there is a button marked Flag. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.

\n\n

There are four parameters in gradio.Interface that control how flagging works. We will go over them in greater detail.

\n\n
    \n
  • allow_flagging: this parameter can be set to either \"manual\" (default), \"auto\", or \"never\".
    \n
      \n
    • manual: users will see a button to flag, and samples are only flagged when the button is clicked.
    • \n
    • auto: users will not see a button to flag, but every sample will be flagged automatically.
    • \n
    • never: users will not see a button to flag, and no sample will be flagged.
    • \n
  • \n
  • flagging_options: this parameter can be either None (default) or a list of strings.\n
      \n
    • If None, then the user simply clicks on the Flag button and no additional options are shown.
    • \n
    • If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is [\"Incorrect\", \"Ambiguous\"], then buttons labeled Flag as Incorrect and Flag as Ambiguous appear. This only applies if allow_flagging is \"manual\".
    • \n
    • The chosen option is then logged along with the input and output.
    • \n
  • \n
  • flagging_dir: this parameter takes a string.\n
      \n
    • It represents what to name the directory where flagged data is stored.
    • \n
  • \n
  • flagging_callback: this parameter takes an instance of a subclass of the FlaggingCallback class\n
      \n
    • Using this parameter allows you to write custom code that gets run when the flag button is clicked
    • \n
    • By default, this is set to an instance of gr.CSVLogger
    • \n
    • One example is setting it to an instance of gr.HuggingFaceDatasetSaver which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)
    • \n
  • \n
\n\n

What happens to flagged data?

\n\n

Within the directory provided by the flagging_dir argument, a CSV file will log the flagged data.

\n\n

Here's an example: The code below creates the calculator interface embedded below it:

\n\n
import gradio as gr\n\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        return num1 / num2\n\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\"\n)\n\niface.launch()\n
\n\n

\n\n

When you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged.

\n\n
+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n
\n\n

If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an image input to image output interface will create the following structure.

\n\n
+-- flagged/\n|   +-- logs.csv\n|   +-- image/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.

\n\n

If we go back to the calculator example, the following code will create the interface embedded below it.

\n\n
iface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n
\n\n

\n\n

When users click the flag button, the csv file will now include a column indicating the selected option.

\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n
\n\n

The HuggingFaceDatasetSaver Callback

\n\n

Sometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.

\n\n

We've made this super easy with the flagging_callback parameter.

\n\n

For example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:

\n\n
import os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n    flagging_callback=hf_writer\n)\n\niface.launch()\n
\n\n

Notice that we define our own \ninstance of gradio.HuggingFaceDatasetSaver using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set allow_flagging=\"manual\"\nbecause on Hugging Face Spaces, allow_flagging is set to \"never\" by default. Here's our demo:

\n\n

\n\n

You can now see all the examples flagged above in this public Hugging Face dataset.

\n\n

\"flagging

\n\n

We created the gradio.HuggingFaceDatasetSaver class, but you can pass your own custom class as long as it inherits from FLaggingCallback defined in this file. If you create a cool callback, contribute it to the repo!

\n\n

Flagging with Blocks

\n\n

What about if you are using gradio.Blocks? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.

\n\n

At the same time, you might want to use an existing FlaggingCallback to avoid writing extra code.\nThis requires two steps:

\n\n
    \n
  1. You have to run your callback's .setup() somewhere in the code prior to the \nfirst time you flag data
  2. \n
  3. When the flagging button is clicked, then you trigger the callback's .flag() method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing.
  4. \n
\n\n

Here is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default CSVLogger:

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n    sepia_filter = strength * np.array(\n        [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n    ) + (1-strength) * np.identity(3)\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            img_input = gr.Image()\n            strength = gr.Slider(0, 1, 0.5)\n        img_output = gr.Image()\n    with gr.Row():\n        btn = gr.Button(\"Flag\")\n\n    # This needs to be called at some point prior to the first call to callback.flag()\n    callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n    img_input.change(sepia, [img_input, strength], img_output)\n    strength.change(sepia, [img_input, strength], img_output)\n\n    # We can choose which components to flag -- in this case, we'll flag all of them\n    btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n
\n\n

\n\n

Privacy

\n\n

Important Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use allow_flagging=auto (when all of the data submitted through the demo is being flagged)

\n\n

That's all! Happy building :)

\n", "tags": ["FLAGGING", "DATA"], "spaces": ["https://huggingface.co/spaces/gradio/calculator-flagging-crowdsourced", "https://huggingface.co/spaces/gradio/calculator-flagging-options", "https://huggingface.co/spaces/gradio/calculator-flag-basic"], "url": "/guides/using-flagging/", "contributor": null}], "parent": "gradio"}, "huggingfacedatasetsaver": {"class": null, "name": "HuggingFaceDatasetSaver", "description": "A callback that saves each flagged sample (both the input and output data) to a HuggingFace dataset.
", "tags": {"guides": "using-flagging"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "hf_token", "annotation": "str", "doc": "The HuggingFace token to use to create (and write the flagged sample to) the HuggingFace dataset (defaults to the registered one)."}, {"name": "dataset_name", "annotation": "str", "doc": "The repo_id of the dataset to save the data to, e.g. \"image-classifier-1\" or \"username/image-classifier-1\"."}, {"name": "organization", "annotation": "str | None", "doc": "Deprecated argument. Please pass a full dataset id (e.g. 'username/dataset_name') to `dataset_name` instead.", "default": "None"}, {"name": "private", "annotation": "bool", "doc": "Whether the dataset should be private (defaults to False).", "default": "False"}, {"name": "info_filename", "annotation": "str", "doc": "The name of the file to save the dataset info (defaults to \"dataset_infos.json\").", "default": "\"dataset_info.json\""}, {"name": "separate_dirs", "annotation": "bool", "doc": "If True, each flagged item will be saved in a separate directory. This makes the flagging more robust to concurrent editing, but may be less convenient to use.", "default": "False"}, {"name": "verbose", "annotation": "bool", "doc": null, "default": "True"}], "returns": {"annotation": null}, "example": "import gradio as gr\nhf_writer = gr.HuggingFaceDatasetSaver(HF_API_TOKEN, \"image-classification-mistakes\")\ndef image_classifier(inp):\n return {'cat': 0.3, 'dog': 0.7}\ndemo = gr.Interface(fn=image_classifier, inputs=\"image\", outputs=\"label\",\n allow_flagging=\"manual\", flagging_callback=hf_writer)", "fns": [], "guides": [{"name": "using-flagging", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 45, "pretty_name": "Using Flagging", "content": "# Using Flagging\n\n\n\n\n## Introduction\n\nWhen you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.\n\nGradio simplifies the collection of this data by including a **Flag** button with every `Interface`. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with `gradio.Interface` as well as with `gradio.Blocks`.\n\n## The **Flag** button in `gradio.Interface`\n\nFlagging with Gradio's `Interface` is especially easy. By default, underneath the output components, there is a button marked **Flag**. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.\n\nThere are [four parameters](https://gradio.app/docs/#interface-header) in `gradio.Interface` that control how flagging works. We will go over them in greater detail.\n\n* `allow_flagging`: this parameter can be set to either `\"manual\"` (default), `\"auto\"`, or `\"never\"`. \n * `manual`: users will see a button to flag, and samples are only flagged when the button is clicked.\n * `auto`: users will not see a button to flag, but every sample will be flagged automatically. \n * `never`: users will not see a button to flag, and no sample will be flagged. \n* `flagging_options`: this parameter can be either `None` (default) or a list of strings.\n * If `None`, then the user simply clicks on the **Flag** button and no additional options are shown.\n * If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is `[\"Incorrect\", \"Ambiguous\"]`, then buttons labeled **Flag as Incorrect** and **Flag as Ambiguous** appear. This only applies if `allow_flagging` is `\"manual\"`.\n * The chosen option is then logged along with the input and output.\n* `flagging_dir`: this parameter takes a string.\n * It represents what to name the directory where flagged data is stored.\n* `flagging_callback`: this parameter takes an instance of a subclass of the `FlaggingCallback` class\n * Using this parameter allows you to write custom code that gets run when the flag button is clicked\n * By default, this is set to an instance of `gr.CSVLogger`\n * One example is setting it to an instance of `gr.HuggingFaceDatasetSaver` which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)\n\n## What happens to flagged data?\n\nWithin the directory provided by the `flagging_dir` argument, a CSV file will log the flagged data. \n\nHere's an example: The code below creates the calculator interface embedded below it:\n\n```python\nimport gradio as gr\n\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n return num1 / num2\n\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\"\n)\n\niface.launch()\n```\n\n\n\nWhen you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged. \n\n```directory\n+-- flagged/\n| +-- logs.csv\n```\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n```\n\nIf the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an `image` input to `image` output interface will create the following structure.\n\n```directory\n+-- flagged/\n| +-- logs.csv\n| +-- image/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n_flagged/logs.csv_\n```csv\nim,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.\n\nIf we go back to the calculator example, the following code will create the interface embedded below it. \n```python\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n```\n\n\nWhen users click the flag button, the csv file will now include a column indicating the selected option.\n\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n```\n\n## The HuggingFaceDatasetSaver Callback\n\nSometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.\n\nWe've made this super easy with the `flagging_callback` parameter.\n\nFor example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:\n\n\n```python\nimport os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n flagging_callback=hf_writer\n)\n\niface.launch()\n```\n\nNotice that we define our own \ninstance of `gradio.HuggingFaceDatasetSaver` using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set `allow_flagging=\"manual\"`\nbecause on Hugging Face Spaces, `allow_flagging` is set to `\"never\"` by default. Here's our demo:\n\n\n\nYou can now see all the examples flagged above in this [public Hugging Face dataset](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo).\n\n![flagging callback hf](https://github.com/gradio-app/gradio/blob/main/guides/assets/flagging-callback-hf.png?raw=true)\n\nWe created the `gradio.HuggingFaceDatasetSaver` class, but you can pass your own custom class as long as it inherits from `FLaggingCallback` defined in [this file](https://github.com/gradio-app/gradio/blob/master/gradio/flagging.py). If you create a cool callback, contribute it to the repo! \n\n## Flagging with Blocks\n\nWhat about if you are using `gradio.Blocks`? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.\n\nAt the same time, you might want to use an existing `FlaggingCallback` to avoid writing extra code.\nThis requires two steps:\n\n1. You have to run your callback's `.setup()` somewhere in the code prior to the \nfirst time you flag data\n2. When the flagging button is clicked, then you trigger the callback's `.flag()` method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing. \n\nHere is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default `CSVLogger`:\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n sepia_filter = strength * np.array(\n [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n ) + (1-strength) * np.identity(3)\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n img_input = gr.Image()\n strength = gr.Slider(0, 1, 0.5)\n img_output = gr.Image()\n with gr.Row():\n btn = gr.Button(\"Flag\")\n \n # This needs to be called at some point prior to the first call to callback.flag()\n callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n img_input.change(sepia, [img_input, strength], img_output)\n strength.change(sepia, [img_input, strength], img_output)\n \n # We can choose which components to flag -- in this case, we'll flag all of them\n btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n```\n\n\n## Privacy\n\nImportant Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use `allow_flagging=auto` (when all of the data submitted through the demo is being flagged)\n\n### That's all! Happy building :) \n", "html": "

Using Flagging

\n\n

Introduction

\n\n

When you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.

\n\n

Gradio simplifies the collection of this data by including a Flag button with every Interface. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with gradio.Interface as well as with gradio.Blocks.

\n\n

The Flag button in gradio.Interface

\n\n

Flagging with Gradio's Interface is especially easy. By default, underneath the output components, there is a button marked Flag. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.

\n\n

There are four parameters in gradio.Interface that control how flagging works. We will go over them in greater detail.

\n\n
    \n
  • allow_flagging: this parameter can be set to either \"manual\" (default), \"auto\", or \"never\".
    \n
      \n
    • manual: users will see a button to flag, and samples are only flagged when the button is clicked.
    • \n
    • auto: users will not see a button to flag, but every sample will be flagged automatically.
    • \n
    • never: users will not see a button to flag, and no sample will be flagged.
    • \n
  • \n
  • flagging_options: this parameter can be either None (default) or a list of strings.\n
      \n
    • If None, then the user simply clicks on the Flag button and no additional options are shown.
    • \n
    • If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is [\"Incorrect\", \"Ambiguous\"], then buttons labeled Flag as Incorrect and Flag as Ambiguous appear. This only applies if allow_flagging is \"manual\".
    • \n
    • The chosen option is then logged along with the input and output.
    • \n
  • \n
  • flagging_dir: this parameter takes a string.\n
      \n
    • It represents what to name the directory where flagged data is stored.
    • \n
  • \n
  • flagging_callback: this parameter takes an instance of a subclass of the FlaggingCallback class\n
      \n
    • Using this parameter allows you to write custom code that gets run when the flag button is clicked
    • \n
    • By default, this is set to an instance of gr.CSVLogger
    • \n
    • One example is setting it to an instance of gr.HuggingFaceDatasetSaver which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)
    • \n
  • \n
\n\n

What happens to flagged data?

\n\n

Within the directory provided by the flagging_dir argument, a CSV file will log the flagged data.

\n\n

Here's an example: The code below creates the calculator interface embedded below it:

\n\n
import gradio as gr\n\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        return num1 / num2\n\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\"\n)\n\niface.launch()\n
\n\n

\n\n

When you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged.

\n\n
+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n
\n\n

If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an image input to image output interface will create the following structure.

\n\n
+-- flagged/\n|   +-- logs.csv\n|   +-- image/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.

\n\n

If we go back to the calculator example, the following code will create the interface embedded below it.

\n\n
iface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n
\n\n

\n\n

When users click the flag button, the csv file will now include a column indicating the selected option.

\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n
\n\n

The HuggingFaceDatasetSaver Callback

\n\n

Sometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.

\n\n

We've made this super easy with the flagging_callback parameter.

\n\n

For example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:

\n\n
import os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n    flagging_callback=hf_writer\n)\n\niface.launch()\n
\n\n

Notice that we define our own \ninstance of gradio.HuggingFaceDatasetSaver using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set allow_flagging=\"manual\"\nbecause on Hugging Face Spaces, allow_flagging is set to \"never\" by default. Here's our demo:

\n\n

\n\n

You can now see all the examples flagged above in this public Hugging Face dataset.

\n\n

\"flagging

\n\n

We created the gradio.HuggingFaceDatasetSaver class, but you can pass your own custom class as long as it inherits from FLaggingCallback defined in this file. If you create a cool callback, contribute it to the repo!

\n\n

Flagging with Blocks

\n\n

What about if you are using gradio.Blocks? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.

\n\n

At the same time, you might want to use an existing FlaggingCallback to avoid writing extra code.\nThis requires two steps:

\n\n
    \n
  1. You have to run your callback's .setup() somewhere in the code prior to the \nfirst time you flag data
  2. \n
  3. When the flagging button is clicked, then you trigger the callback's .flag() method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing.
  4. \n
\n\n

Here is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default CSVLogger:

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n    sepia_filter = strength * np.array(\n        [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n    ) + (1-strength) * np.identity(3)\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            img_input = gr.Image()\n            strength = gr.Slider(0, 1, 0.5)\n        img_output = gr.Image()\n    with gr.Row():\n        btn = gr.Button(\"Flag\")\n\n    # This needs to be called at some point prior to the first call to callback.flag()\n    callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n    img_input.change(sepia, [img_input, strength], img_output)\n    strength.change(sepia, [img_input, strength], img_output)\n\n    # We can choose which components to flag -- in this case, we'll flag all of them\n    btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n
\n\n

\n\n

Privacy

\n\n

Important Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use allow_flagging=auto (when all of the data submitted through the demo is being flagged)

\n\n

That's all! Happy building :)

\n", "tags": ["FLAGGING", "DATA"], "spaces": ["https://huggingface.co/spaces/gradio/calculator-flagging-crowdsourced", "https://huggingface.co/spaces/gradio/calculator-flagging-options", "https://huggingface.co/spaces/gradio/calculator-flag-basic"], "url": "/guides/using-flagging/", "contributor": null}], "parent": "gradio"}, "base": {"class": null, "name": "Base", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "primary_hue", "annotation": "colors.Color | str", "doc": "The primary hue of the theme. Load a preset, like gradio.themes.colors.green (or just the string \"green\"), or pass your own gradio.themes.utils.Color object.", "default": "Color()"}, {"name": "secondary_hue", "annotation": "colors.Color | str", "doc": "The secondary hue of the theme. Load a preset, like gradio.themes.colors.green (or just the string \"green\"), or pass your own gradio.themes.utils.Color object.", "default": "Color()"}, {"name": "neutral_hue", "annotation": "colors.Color | str", "doc": "The neutral hue of the theme, used . Load a preset, like gradio.themes.colors.green (or just the string \"green\"), or pass your own gradio.themes.utils.Color object.", "default": "Color()"}, {"name": "text_size", "annotation": "sizes.Size | str", "doc": "The size of the text. Load a preset, like gradio.themes.sizes.text_sm (or just the string \"sm\"), or pass your own gradio.themes.utils.Size object.", "default": "Size()"}, {"name": "spacing_size", "annotation": "sizes.Size | str", "doc": "The size of the spacing. Load a preset, like gradio.themes.sizes.spacing_sm (or just the string \"sm\"), or pass your own gradio.themes.utils.Size object.", "default": "Size()"}, {"name": "radius_size", "annotation": "sizes.Size | str", "doc": "The radius size of corners. Load a preset, like gradio.themes.sizes.radius_sm (or just the string \"sm\"), or pass your own gradio.themes.utils.Size object.", "default": "Size()"}, {"name": "font", "annotation": "fonts.Font | str | Iterable[fonts.Font | str]", "doc": "The primary font to use for the theme. Pass a string for a system font, or a gradio.themes.font.GoogleFont object to load a font from Google Fonts. Pass a list of fonts for fallbacks.", "default": "(, 'ui-sans-serif', 'system-ui', 'sans-serif')"}, {"name": "font_mono", "annotation": "fonts.Font | str | Iterable[fonts.Font | str]", "doc": "The monospace font to use for the theme, applies to code. Pass a string for a system font, or a gradio.themes.font.GoogleFont object to load a font from Google Fonts. Pass a list of fonts for fallbacks.", "default": "(, 'ui-monospace', 'Consolas', 'monospace')"}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "push_to_hub", "description": "Upload a theme to the HuggingFace hub.
This requires a HuggingFace account.
", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "repo_name", "annotation": "str", "doc": "The name of the repository to store the theme assets, e.g. 'my_theme' or 'sunset'."}, {"name": "org_name", "annotation": "str | None", "doc": "The name of the org to save the space in. If None (the default), the username corresponding to the logged in user, or h\u0192_token is used.", "default": "None"}, {"name": "version", "annotation": "str | None", "doc": "A semantic version tag for theme. Bumping the version tag lets you publish updates to a theme without changing the look of applications that already loaded your theme.", "default": "None"}, {"name": "hf_token", "annotation": "str | None", "doc": "API token for your HuggingFace account", "default": "None"}, {"name": "theme_name", "annotation": "str | None", "doc": "Name for the name. If None, defaults to repo_name", "default": "None"}, {"name": "description", "annotation": "str | None", "doc": "A long form description to your theme.", "default": "None"}, {"name": "private", "annotation": "bool", "doc": null, "default": "False"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Base"}, {"fn": null, "name": "from_hub", "description": "Load a theme from the hub.
This DOES NOT require a HuggingFace account for downloading publicly available themes.
", "tags": {}, "parameters": [{"name": "repo_name", "annotation": "str", "doc": "string of the form /@. If a semantic version expression is omitted, the latest version will be fetched."}, {"name": "hf_token", "annotation": "str | None", "doc": "HuggingFace Token. Only needed to download private themes.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Base"}, {"fn": null, "name": "load", "description": "Load a theme from a json file.
", "tags": {}, "parameters": [{"name": "path", "annotation": "str", "doc": "The filepath to read."}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Base"}, {"fn": null, "name": "dump", "description": "Write the theme to a json file.
", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "filename", "annotation": "str", "doc": "The path to write the theme too"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Base"}, {"fn": null, "name": "from_dict", "description": "Create a theme instance from a dictionary representation.
", "tags": {}, "parameters": [{"name": "theme", "annotation": "dict[str, dict[str, str]]", "doc": "The dictionary representation of the theme."}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Base"}, {"fn": null, "name": "to_dict", "description": "Convert the theme into a python dictionary.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Base"}], "parent": "gradio"}, "queue": {"class": null, "name": "queue", "description": "You can control the rate of processed requests by creating a queue. This will allow you to set the number of requests to be processed at one time, and will let users know their position in the queue.", "tags": {"parameters": "concurrency_count: Number of worker threads that will be processing requests from the queue concurrently. Increasing this number will increase the rate at which requests are processed, but will also increase the memory usage of the queue.
status_update_rate: If \"auto\", Queue will send status estimations to all clients whenever a job is finished. Otherwise Queue will send status at regular intervals set by this parameter as the number of seconds.
client_position_to_load_data: DEPRECATED. This parameter is deprecated and has no effect.
default_enabled: Deprecated and has no effect.
api_open: If True, the REST routes of the backend will be open, allowing requests made directly to those endpoints to skip the queue.
max_size: The maximum number of events the queue will store at any given moment. If the queue is full, new events will not be added and a user will receive a message saying that the queue is full. If None, the queue size will be unlimited.
with gr.Blocks() as demo:
button = gr.Button(label=\"Generate Image\")
button.click(fn=image_generator, inputs=gr.Textbox(), outputs=gr.Image())
demo.queue(concurrency_count=3)
demo.launch()
demo = gr.Interface(image_generator, gr.Textbox(), gr.Image())
demo.queue(concurrency_count=3)
demo.launch()"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "concurrency_count", "annotation": "int", "doc": "Number of worker threads that will be processing requests from the queue concurrently. Increasing this number will increase the rate at which requests are processed, but will also increase the memory usage of the queue.", "default": "1"}, {"name": "status_update_rate", "annotation": "float | Literal['auto']", "doc": "If \"auto\", Queue will send status estimations to all clients whenever a job is finished. Otherwise Queue will send status at regular intervals set by this parameter as the number of seconds.", "default": "\"auto\""}, {"name": "client_position_to_load_data", "annotation": "int | None", "doc": "DEPRECATED. This parameter is deprecated and has no effect.", "default": "None"}, {"name": "default_enabled", "annotation": "bool | None", "doc": "Deprecated and has no effect.", "default": "None"}, {"name": "api_open", "annotation": "bool", "doc": "If True, the REST routes of the backend will be open, allowing requests made directly to those endpoints to skip the queue.", "default": "True"}, {"name": "max_size", "annotation": "int | None", "doc": "The maximum number of events the queue will store at any given moment. If the queue is full, new events will not be added and a user will receive a message saying that the queue is full. If None, the queue size will be unlimited.", "default": "None"}], "returns": {"annotation": null}, "example": "(\nI\nn\nt\ne\nr\nf\na\nc\ne\n)", "fns": [], "parent": "gradio"}, "blocks": {"class": null, "name": "Blocks", "description": "Blocks is Gradio's low-level API that allows you to create more custom web applications and demos than Interfaces (yet still entirely in Python).

Compared to the Interface class, Blocks offers more flexibility and control over: (1) the layout of components (2) the events that trigger the execution of functions (3) data flows (e.g. inputs can trigger outputs, which can trigger the next level of outputs). Blocks also offers ways to group together related demos such as with tabs.

The basic usage of Blocks is as follows: create a Blocks object, then use it as a context (with the \"with\" statement), and then define layouts, components, or events within the Blocks context. Finally, call the launch() method to launch the demo.
", "tags": {"demos": "blocks_hello, blocks_flipper, blocks_speech_text_sentiment, generate_english_german, sound_alert", "guides": "blocks-and-event-listeners, controlling-layout, state-in-blocks, custom-CSS-and-JS, custom-interpretations-with-blocks, using-blocks-like-functions"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "theme", "annotation": "Theme | str | None", "doc": "a Theme object or a string representing a theme. If a string, will look for a built-in theme with that name (e.g. \"soft\" or \"default\"), or will attempt to load a theme from the HF Hub (e.g. \"gradio/monochrome\"). If None, will use the Default theme.", "default": "None"}, {"name": "analytics_enabled", "annotation": "bool | None", "doc": "whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable or default to True.", "default": "None"}, {"name": "mode", "annotation": "str", "doc": "a human-friendly name for the kind of Blocks or Interface being created.", "default": "\"blocks\""}, {"name": "title", "annotation": "str", "doc": "The tab title to display when this is opened in a browser window.", "default": "\"Gradio\""}, {"name": "css", "annotation": "str | None", "doc": "custom css or path to custom css file to apply to entire Blocks", "default": "None"}], "returns": {"annotation": null}, "example": "import gradio as gr\ndef update(name):\n return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Start typing below and then click **Run** to see the output.\")\n with gr.Row():\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n btn = gr.Button(\"Run\")\n btn.click(fn=update, inputs=inp, outputs=out)\n\ndemo.launch()", "fns": [{"fn": null, "name": "launch", "description": "Launches a simple web server that serves the demo. Can also be used to create a public link used by anyone to access the demo from their browser by setting share=True.
", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "inline", "annotation": "bool | None", "doc": "whether to display in the interface inline in an iframe. Defaults to True in python notebooks; False otherwise.", "default": "None"}, {"name": "inbrowser", "annotation": "bool", "doc": "whether to automatically launch the interface in a new tab on the default browser.", "default": "False"}, {"name": "share", "annotation": "bool | None", "doc": "whether to create a publicly shareable link for the interface. Creates an SSH tunnel to make your UI accessible from anywhere. If not provided, it is set to False by default every time, except when running in Google Colab. When localhost is not accessible (e.g. Google Colab), setting share=False is not supported.", "default": "None"}, {"name": "debug", "annotation": "bool", "doc": "if True, blocks the main thread from running. If running in Google Colab, this is needed to print the errors in the cell output.", "default": "False"}, {"name": "enable_queue", "annotation": "bool | None", "doc": "DEPRECATED (use .queue() method instead.) if True, inference requests will be served through a queue instead of with parallel threads. Required for longer inference times (> 1min) to prevent timeout. The default option in HuggingFace Spaces is True. The default option elsewhere is False.", "default": "None"}, {"name": "max_threads", "annotation": "int", "doc": "the maximum number of total threads that the Gradio app can generate in parallel. The default is inherited from the starlette library (currently 40). Applies whether the queue is enabled or not. But if queuing is enabled, this parameter is increaseed to be at least the concurrency_count of the queue.", "default": "40"}, {"name": "auth", "annotation": "Callable | tuple[str, str] | list[tuple[str, str]] | None", "doc": "If provided, username and password (or list of username-password tuples) required to access interface. Can also provide function that takes username and password and returns True if valid login.", "default": "None"}, {"name": "auth_message", "annotation": "str | None", "doc": "If provided, HTML message provided on login page.", "default": "None"}, {"name": "prevent_thread_lock", "annotation": "bool", "doc": "If True, the interface will block the main thread while the server is running.", "default": "False"}, {"name": "show_error", "annotation": "bool", "doc": "If True, any errors in the interface will be displayed in an alert modal and printed in the browser console log", "default": "False"}, {"name": "server_name", "annotation": "str | None", "doc": "to make app accessible on local network, set this to \"0.0.0.0\". Can be set by environment variable GRADIO_SERVER_NAME. If None, will use \"127.0.0.1\".", "default": "None"}, {"name": "server_port", "annotation": "int | None", "doc": "will start gradio app on this port (if available). Can be set by environment variable GRADIO_SERVER_PORT. If None, will search for an available port starting at 7860.", "default": "None"}, {"name": "show_tips", "annotation": "bool", "doc": "if True, will occasionally show tips about new Gradio features", "default": "False"}, {"name": "height", "annotation": "int", "doc": "The height in pixels of the iframe element containing the interface (used if inline=True)", "default": "500"}, {"name": "width", "annotation": "int | str", "doc": "The width in pixels of the iframe element containing the interface (used if inline=True)", "default": "\"100%\""}, {"name": "encrypt", "annotation": "bool | None", "doc": "DEPRECATED. Has no effect.", "default": "None"}, {"name": "favicon_path", "annotation": "str | None", "doc": "If a path to a file (.png, .gif, or .ico) is provided, it will be used as the favicon for the web page.", "default": "None"}, {"name": "ssl_keyfile", "annotation": "str | None", "doc": "If a path to a file is provided, will use this as the private key file to create a local server running on https.", "default": "None"}, {"name": "ssl_certfile", "annotation": "str | None", "doc": "If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.", "default": "None"}, {"name": "ssl_keyfile_password", "annotation": "str | None", "doc": "If a password is provided, will use this with the ssl certificate for https.", "default": "None"}, {"name": "ssl_verify", "annotation": "bool", "doc": "If False, skips certificate validation which allows self-signed certificates to be used.", "default": "True"}, {"name": "quiet", "annotation": "bool", "doc": "If True, suppresses most print statements.", "default": "False"}, {"name": "show_api", "annotation": "bool", "doc": "If True, shows the api docs in the footer of the app. Default True. If the queue is enabled, then api_open parameter of .queue() will determine if the api docs are shown, independent of the value of show_api.", "default": "True"}, {"name": "file_directories", "annotation": "list[str] | None", "doc": "This parameter has been renamed to `allowed_paths`. It will be removed in a future version.", "default": "None"}, {"name": "allowed_paths", "annotation": "list[str] | None", "doc": "List of complete filepaths or parent directories that gradio is allowed to serve (in addition to the directory containing the gradio python file). Must be absolute paths. Warning: if you provide directories, any files in these directories or their subdirectories are accessible to all users of your app.", "default": "None"}, {"name": "blocked_paths", "annotation": "list[str] | None", "doc": "List of complete filepaths or parent directories that gradio is not allowed to serve (i.e. users of your app are not allowed to access). Must be absolute paths. Warning: takes precedence over `allowed_paths` and all other directories exposed by Gradio by default.", "default": "None"}, {"name": "root_path", "annotation": "str | None", "doc": "The root path (or \"mount point\") of the application, if it's not served from the root (\"/\") of the domain. Often used when the application is behind a reverse proxy that forwards requests to the application. For example, if the application is served at \"https://example.com/myapp\", the `root_path` should be set to \"/myapp\". Can be set by environment variable GRADIO_ROOT_PATH. Defaults to \"\".", "default": "None"}, {"name": "app_kwargs", "annotation": "dict[str, Any] | None", "doc": "Additional keyword arguments to pass to the underlying FastAPI app as a dictionary of parameter keys and argument values. For example, `{\"docs_url\": \"/docs\"}`", "default": "None"}], "returns": {}, "example": "import gradio as gr\ndef reverse(text):\n return text[::-1]\nwith gr.Blocks() as demo:\n button = gr.Button(value=\"Reverse\")\n button.click(reverse, gr.Textbox(), gr.Textbox())\ndemo.launch(share=True, auth=(\"username\", \"password\"))", "override_signature": null, "parent": "gradio.Blocks"}, {"fn": null, "name": "queue", "description": "You can control the rate of processed requests by creating a queue. This will allow you to set the number of requests to be processed at one time, and will let users know their position in the queue.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "concurrency_count", "annotation": "int", "doc": "Number of worker threads that will be processing requests from the queue concurrently. Increasing this number will increase the rate at which requests are processed, but will also increase the memory usage of the queue.", "default": "1"}, {"name": "status_update_rate", "annotation": "float | Literal['auto']", "doc": "If \"auto\", Queue will send status estimations to all clients whenever a job is finished. Otherwise Queue will send status at regular intervals set by this parameter as the number of seconds.", "default": "\"auto\""}, {"name": "client_position_to_load_data", "annotation": "int | None", "doc": "DEPRECATED. This parameter is deprecated and has no effect.", "default": "None"}, {"name": "default_enabled", "annotation": "bool | None", "doc": "Deprecated and has no effect.", "default": "None"}, {"name": "api_open", "annotation": "bool", "doc": "If True, the REST routes of the backend will be open, allowing requests made directly to those endpoints to skip the queue.", "default": "True"}, {"name": "max_size", "annotation": "int | None", "doc": "The maximum number of events the queue will store at any given moment. If the queue is full, new events will not be added and a user will receive a message saying that the queue is full. If None, the queue size will be unlimited.", "default": "None"}], "returns": {}, "example": "with gr.Blocks() as demo:\n button = gr.Button(label=\"Generate Image\")\n button.click(fn=image_generator, inputs=gr.Textbox(), outputs=gr.Image())\ndemo.queue(concurrency_count=3)\ndemo.launch()", "override_signature": null, "parent": "gradio.Blocks"}, {"fn": null, "name": "integrate", "description": "A catch-all method for integrating with other libraries. This method should be run after launch()", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "comet_ml", "annotation": "", "doc": "If a comet_ml Experiment object is provided, will integrate with the experiment and appear on Comet dashboard", "default": "None"}, {"name": "wandb", "annotation": "ModuleType | None", "doc": "If the wandb module is provided, will integrate with it and appear on WandB dashboard", "default": "None"}, {"name": "mlflow", "annotation": "ModuleType | None", "doc": "If the mlflow module is provided, will integrate with the experiment and appear on ML Flow dashboard", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Blocks"}, {"fn": null, "name": "load", "description": "For reverse compatibility reasons, this is both a class method and an instance method, the two of which, confusingly, do two completely different things.

Class method: loads a demo from a Hugging Face Spaces repo and creates it locally and returns a block instance. Warning: this method will be deprecated. Use the equivalent `gradio.load()` instead.

Instance method: adds event that runs as soon as the demo loads in the browser. Example usage below.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "Instance Method - the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.", "default": "None"}, {"name": "inputs", "annotation": "list[Component] | None", "doc": "Instance Method - List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "list[Component] | None", "doc": "Instance Method - List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Instance Method - Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "Instance Method - If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "str", "doc": "Instance Method - If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "", "doc": "Instance Method - If True, will place the request on the queue, if the queue exists", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "Instance Method - If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Instance Method - Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "Instance Method - If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "Instance Method - If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "every", "annotation": "float | None", "doc": "Instance Method - Run this event 'every' number of seconds. Interpreted in seconds. Queue must be enabled.", "default": "None"}, {"name": "name", "annotation": "str | None", "doc": "Class Method - the name of the model (e.g. \"gpt2\" or \"facebook/bart-base\") or space (e.g. \"flax-community/spanish-gpt2\"), can include the `src` as prefix (e.g. \"models/facebook/bart-base\")", "default": "None"}, {"name": "src", "annotation": "str | None", "doc": "Class Method - the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)", "default": "None"}, {"name": "api_key", "annotation": "str | None", "doc": "Class Method - optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.", "default": "None"}, {"name": "alias", "annotation": "str | None", "doc": "Class Method - optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)", "default": "None"}], "returns": {}, "example": "import gradio as gr\nimport datetime\nwith gr.Blocks() as demo:\n def get_time():\n return datetime.datetime.now().time()\n dt = gr.Textbox(label=\"Current time\")\n demo.load(get_time, inputs=None, outputs=dt)\ndemo.launch()", "override_signature": null, "parent": "gradio.Blocks"}], "demos": [["blocks_hello", "import gradio as gr\n\ndef welcome(name):\n return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Hello World!\n Start typing below to see the output.\n \"\"\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n inp.change(welcome, inp, out)\n\nif __name__ == \"__main__\":\n demo.launch()"], ["blocks_flipper", "import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["blocks_speech_text_sentiment", "from transformers import pipeline\n\nimport gradio as gr\n\nasr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\nclassifier = pipeline(\"text-classification\")\n\n\ndef speech_to_text(speech):\n text = asr(speech)[\"text\"]\n return text\n\n\ndef text_to_sentiment(text):\n return classifier(text)[0][\"label\"]\n\n\ndemo = gr.Blocks()\n\nwith demo:\n audio_file = gr.Audio(type=\"filepath\")\n text = gr.Textbox()\n label = gr.Label()\n\n b1 = gr.Button(\"Recognize Speech\")\n b2 = gr.Button(\"Classify Sentiment\")\n\n b1.click(speech_to_text, inputs=audio_file, outputs=text)\n b2.click(text_to_sentiment, inputs=text, outputs=label)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["generate_english_german", "import gradio as gr\n\nfrom transformers import pipeline\n\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english_translator\")\nenglish_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n\n\ndef generate_text(text):\n english_text = english_generator(text)[0][\"generated_text\"]\n german_text = english_translator(english_text)\n return english_text, german_text\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n seed = gr.Text(label=\"Input Phrase\")\n with gr.Column():\n english = gr.Text(label=\"Generated English Text\")\n german = gr.Text(label=\"Generated German Text\")\n btn = gr.Button(\"Generate\")\n btn.click(generate_text, inputs=[seed], outputs=[english, german])\n gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n\nif __name__ == \"__main__\":\n demo.launch()"], ["sound_alert", "import time\nimport gradio as gr\n\n\njs_function = \"() => {new Audio('file=beep.mp3').play();}\"\n\ndef task(x):\n time.sleep(2)\n return \"Hello, \" + x \n\nwith gr.Blocks() as demo:\n name = gr.Textbox(label=\"name\")\n greeting = gr.Textbox(label=\"greeting\")\n name.blur(task, name, greeting)\n greeting.change(None, [], [], _js=js_function) # Note that _js is a special argument whose usage may change in the future\n \ndemo.launch()"]], "guides": [{"name": "blocks-and-event-listeners", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 1, "absolute_index": 8, "pretty_name": "Blocks And Event Listeners", "content": "# Blocks and Event Listeners\n\nWe took a quick look at Blocks in the [Quickstart](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control). Let's dive deeper. This guide will cover the how Blocks are structured, event listeners and their types, running events continuously, updating configurations, and using dictionaries vs lists. \n\n## Blocks Structure\n\nTake a look at the demo below.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n name = gr.Textbox(label=\"Name\")\n output = gr.Textbox(label=\"Output Box\")\n greet_btn = gr.Button(\"Greet\")\n greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n \n\ndemo.launch()\n```\n\n\n- First, note the `with gr.Blocks() as demo:` clause. The Blocks app code will be contained within this clause.\n- Next come the Components. These are the same Components used in `Interface`. However, instead of being passed to some constructor, Components are automatically added to the Blocks as they are created within the `with` clause.\n- Finally, the `click()` event listener. Event listeners define the data flow within the app. In the example above, the listener ties the two Textboxes together. The Textbox `name` acts as the input and Textbox `output` acts as the output to the `greet` method. This dataflow is triggered when the Button `greet_btn` is clicked. Like an Interface, an event listener can take multiple inputs or outputs.\n\n## Event Listeners and Interactivity\n\nIn the example above, you'll notice that you are able to edit Textbox `name`, but not Textbox `output`. This is because any Component that acts as an input to an event listener is made interactive. However, since Textbox `output` acts only as an output, Gradio determines that it should not be made interactive. You can override the default behavior and directly configure the interactivity of a Component with the boolean `interactive` keyword argument. \n\n```python\noutput = gr.Textbox(label=\"Output\", interactive=True)\n```\n\n_Note_: What happens if a Gradio component is neither an input nor an output? If a component is constructed with a default value, then it is presumed to be displaying content and is rendered non-interactive. Otherwise, it is rendered interactive. Again, this behavior can be overridden by specifying a value for the `interactive` argument.\n\n## Types of Event Listeners\n\nTake a look at the demo below:\n\n```python\nimport gradio as gr\n\ndef welcome(name):\n return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Hello World!\n Start typing below to see the output.\n \"\"\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n inp.change(welcome, inp, out)\n\ndemo.launch()\n```\n\n\nInstead of being triggered by a click, the `welcome` function is triggered by typing in the Textbox `inp`. This is due to the `change()` event listener. Different Components support different event listeners. For example, the `Video` Component supports a `play()` event listener, triggered when a user presses play. See the [Docs](http://gradio.app/docs#components) for the event listeners for each Component.\n\n## Multiple Data Flows\n\nA Blocks app is not limited to a single data flow the way Interfaces are. Take a look at the demo below:\n\n```python\nimport gradio as gr\n\ndef increase(num):\n return num + 1\n\nwith gr.Blocks() as demo:\n a = gr.Number(label=\"a\")\n b = gr.Number(label=\"b\")\n btoa = gr.Button(\"a > b\")\n atob = gr.Button(\"b > a\")\n atob.click(increase, a, b)\n btoa.click(increase, b, a)\n\ndemo.launch()\n```\n\n\nNote that `num1` can act as input to `num2`, and also vice-versa! As your apps get more complex, you will have many data flows connecting various Components. \n\nHere's an example of a \"multi-step\" demo, where the output of one model (a speech-to-text model) gets fed into the next model (a sentiment classifier).\n\n```python\nfrom transformers import pipeline\n\nimport gradio as gr\n\nasr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\nclassifier = pipeline(\"text-classification\")\n\n\ndef speech_to_text(speech):\n text = asr(speech)[\"text\"]\n return text\n\n\ndef text_to_sentiment(text):\n return classifier(text)[0][\"label\"]\n\n\ndemo = gr.Blocks()\n\nwith demo:\n audio_file = gr.Audio(type=\"filepath\")\n text = gr.Textbox()\n label = gr.Label()\n\n b1 = gr.Button(\"Recognize Speech\")\n b2 = gr.Button(\"Classify Sentiment\")\n\n b1.click(speech_to_text, inputs=audio_file, outputs=text)\n b2.click(text_to_sentiment, inputs=text, outputs=label)\n\ndemo.launch()\n\n```\n\n\n## Function Input List vs Dict\n\nThe event listeners you've seen so far have a single input component. If you'd like to have multiple input components pass data to the function, you have two options on how the function can accept input component values:\n\n1. as a list of arguments, or\n2. as a single dictionary of values, keyed by the component\n\nLet's see an example of each:\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n a = gr.Number(label=\"a\")\n b = gr.Number(label=\"b\")\n with gr.Row():\n add_btn = gr.Button(\"Add\")\n sub_btn = gr.Button(\"Subtract\")\n c = gr.Number(label=\"sum\")\n\n def add(num1, num2):\n return num1 + num2\n add_btn.click(add, inputs=[a, b], outputs=c)\n\n def sub(data):\n return data[a] - data[b]\n sub_btn.click(sub, inputs={a, b}, outputs=c)\n\n\ndemo.launch()\n```\n\nBoth `add()` and `sub()` take `a` and `b` as inputs. However, the syntax is different between these listeners. \n\n1. To the `add_btn` listener, we pass the inputs as a list. The function `add()` takes each of these inputs as arguments. The value of `a` maps to the argument `num1`, and the value of `b` maps to the argument `num2`.\n2. To the `sub_btn` listener, we pass the inputs as a set (note the curly brackets!). The function `sub()` takes a single dictionary argument `data`, where the keys are the input components, and the values are the values of those components.\n\nIt is a matter of preference which syntax you prefer! For functions with many input components, option 2 may be easier to manage.\n\n\n\n## Function Return List vs Dict\n\nSimilarly, you may return values for multiple output components either as:\n\n1. a list of values, or\n2. a dictionary keyed by the component\n\nLet's first see an example of (1), where we set the values of two output components by returning two values:\n\n```python\nwith gr.Blocks() as demo:\n food_box = gr.Number(value=10, label=\"Food Count\")\n status_box = gr.Textbox()\n def eat(food):\n if food > 0:\n return food - 1, \"full\"\n else:\n return 0, \"hungry\"\n gr.Button(\"EAT\").click(\n fn=eat, \n inputs=food_box,\n outputs=[food_box, status_box]\n )\n```\n\nAbove, each return statement returns two values corresponding to `food_box` and `status_box`, respectively.\n\nInstead of returning a list of values corresponding to each output component in order, you can also return a dictionary, with the key corresponding to the output component and the value as the new value. This also allows you to skip updating some output components. \n\n```python\nwith gr.Blocks() as demo:\n food_box = gr.Number(value=10, label=\"Food Count\")\n status_box = gr.Textbox()\n def eat(food):\n if food > 0:\n return {food_box: food - 1, status_box: \"full\"}\n else:\n return {status_box: \"hungry\"}\n gr.Button(\"EAT\").click(\n fn=eat, \n inputs=food_box,\n outputs=[food_box, status_box]\n )\n```\n\nNotice how when there is no food, we only update the `status_box` element. We skipped updating the `food_box` component.\n\nDictionary returns are helpful when an event listener affects many components on return, or conditionally affects outputs and not others.\n\nKeep in mind that with dictionary returns, we still need to specify the possible outputs in the event listener.\n\n## Updating Component Configurations\n\nThe return value of an event listener function is usually the updated value of the corresponding output Component. Sometimes we want to update the configuration of the Component as well, such as the visibility. In this case, we return a `gr.update()` object instead of just the update Component value.\n\n```python\nimport gradio as gr\n\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.update(lines=2, visible=True, value=\"Short story: \")\n elif choice == \"long\":\n return gr.update(lines=8, visible=True, value=\"Long story...\")\n else:\n return gr.update(visible=False)\n\nwith gr.Blocks() as demo:\n radio = gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"Essay Length to Write?\"\n )\n text = gr.Textbox(lines=2, interactive=True)\n radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\ndemo.launch()\n```\n\n\nSee how we can configure the Textbox itself through the `gr.update()` method. The `value=` argument can still be used to update the value along with Component configuration.\n\n## Running Events Consecutively\n\nYou can also run events consecutively by using the `then` method of an event listener. This will run an event after the previous event has finished running. This is useful for running events that update components in multiple steps. \n\nFor example, in the chatbot example below, we first update the chatbot with the user message immediately, and then update the chatbot with the computer response after a simulated delay.\n\n```python\nimport gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.Button(\"Clear\")\n\n def user(user_message, history):\n return \"\", history + [[user_message, None]]\n\n def bot(history):\n bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n time.sleep(2)\n history[-1][1] = bot_message\n return history\n\n msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n clear.click(lambda: None, None, chatbot, queue=False)\n \ndemo.queue()\ndemo.launch()\n\n```\n\n\nThe `.then()` method of an event listener executes the subsequent event regardless of whether the previous event raised any errors. If you'd like to only run subsequent events if the previous event executed successfully, use the `.success()` method, which takes the same arguments as `.then()`.\n\n## Running Events Continuously\n\nYou can run events on a fixed schedule using the `every` parameter of the event listener. This will run the event\n`every` number of seconds while the client connection is open. If the connection is closed, the event will stop running after the following iteration.\nNote that this does not take into account the runtime of the event itself. So a function\nwith a 1 second runtime running with `every=5`, would actually run every 6 seconds.\n\nHere is an example of a sine curve that updates every second!\n\n```python\nimport math\nimport gradio as gr\nimport plotly.express as px\nimport numpy as np\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2*math.pi*period * x)\n fig = px.line(x=x, y=y)\n plot_end += 2 * math.pi\n if plot_end > 1000:\n plot_end = 2 * math.pi\n return fig\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n plot = gr.Plot(label=\"Plot (updates every half second)\")\n\n dep = demo.load(get_plot, None, plot, every=1)\n period.change(get_plot, period, plot, every=1, cancels=[dep])\n\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n\n```\n\n\n## Gathering Event Data\n\nYou can gather specific data about an event by adding the associated event data class as a type hint to an argument in the event listener function. \n\nFor example, event data for `.select()` can be type hinted by a `gradio.SelectData` argument. This event is triggered when a user selects some part of the triggering component, and the event data includes information about what the user specifically selected. If a user selected a specific word in a `Textbox`, a specific image in a `Gallery`, or a specific cell in a `DataFrame`, the event data argument would contain information about the specific selection.\n\nIn the 2 player tic-tac-toe demo below, a user can select a cell in the `DataFrame` to make a move. The event data argument contains information about the specific cell that was selected. We can first check to see if the cell is empty, and then update the cell with the user's move. \n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n\n def place(board, turn, evt: gr.SelectData):\n if evt.value:\n return board, turn\n board[evt.index[0]][evt.index[1]] = turn\n turn = \"O\" if turn == \"X\" else \"X\"\n return board, turn\n\n board.select(place, [board, turn], [board, turn])\n\ndemo.launch()\n```\n", "html": "

Blocks and Event Listeners

\n\n

We took a quick look at Blocks in the Quickstart. Let's dive deeper. This guide will cover the how Blocks are structured, event listeners and their types, running events continuously, updating configurations, and using dictionaries vs lists.

\n\n

Blocks Structure

\n\n

Take a look at the demo below.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n    name = gr.Textbox(label=\"Name\")\n    output = gr.Textbox(label=\"Output Box\")\n    greet_btn = gr.Button(\"Greet\")\n    greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n\n\ndemo.launch()\n
\n\n

\n\n
    \n
  • First, note the with gr.Blocks() as demo: clause. The Blocks app code will be contained within this clause.
  • \n
  • Next come the Components. These are the same Components used in Interface. However, instead of being passed to some constructor, Components are automatically added to the Blocks as they are created within the with clause.
  • \n
  • Finally, the click() event listener. Event listeners define the data flow within the app. In the example above, the listener ties the two Textboxes together. The Textbox name acts as the input and Textbox output acts as the output to the greet method. This dataflow is triggered when the Button greet_btn is clicked. Like an Interface, an event listener can take multiple inputs or outputs.
  • \n
\n\n

Event Listeners and Interactivity

\n\n

In the example above, you'll notice that you are able to edit Textbox name, but not Textbox output. This is because any Component that acts as an input to an event listener is made interactive. However, since Textbox output acts only as an output, Gradio determines that it should not be made interactive. You can override the default behavior and directly configure the interactivity of a Component with the boolean interactive keyword argument.

\n\n
output = gr.Textbox(label=\"Output\", interactive=True)\n
\n\n

Note: What happens if a Gradio component is neither an input nor an output? If a component is constructed with a default value, then it is presumed to be displaying content and is rendered non-interactive. Otherwise, it is rendered interactive. Again, this behavior can be overridden by specifying a value for the interactive argument.

\n\n

Types of Event Listeners

\n\n

Take a look at the demo below:

\n\n
import gradio as gr\n\ndef welcome(name):\n    return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\n    \"\"\"\n    # Hello World!\n    Start typing below to see the output.\n    \"\"\")\n    inp = gr.Textbox(placeholder=\"What is your name?\")\n    out = gr.Textbox()\n    inp.change(welcome, inp, out)\n\ndemo.launch()\n
\n\n

\n\n

Instead of being triggered by a click, the welcome function is triggered by typing in the Textbox inp. This is due to the change() event listener. Different Components support different event listeners. For example, the Video Component supports a play() event listener, triggered when a user presses play. See the Docs for the event listeners for each Component.

\n\n

Multiple Data Flows

\n\n

A Blocks app is not limited to a single data flow the way Interfaces are. Take a look at the demo below:

\n\n
import gradio as gr\n\ndef increase(num):\n    return num + 1\n\nwith gr.Blocks() as demo:\n    a = gr.Number(label=\"a\")\n    b = gr.Number(label=\"b\")\n    btoa = gr.Button(\"a > b\")\n    atob = gr.Button(\"b > a\")\n    atob.click(increase, a, b)\n    btoa.click(increase, b, a)\n\ndemo.launch()\n
\n\n

\n\n

Note that num1 can act as input to num2, and also vice-versa! As your apps get more complex, you will have many data flows connecting various Components.

\n\n

Here's an example of a \"multi-step\" demo, where the output of one model (a speech-to-text model) gets fed into the next model (a sentiment classifier).

\n\n
from transformers import pipeline\n\nimport gradio as gr\n\nasr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\nclassifier = pipeline(\"text-classification\")\n\n\ndef speech_to_text(speech):\n    text = asr(speech)[\"text\"]\n    return text\n\n\ndef text_to_sentiment(text):\n    return classifier(text)[0][\"label\"]\n\n\ndemo = gr.Blocks()\n\nwith demo:\n    audio_file = gr.Audio(type=\"filepath\")\n    text = gr.Textbox()\n    label = gr.Label()\n\n    b1 = gr.Button(\"Recognize Speech\")\n    b2 = gr.Button(\"Classify Sentiment\")\n\n    b1.click(speech_to_text, inputs=audio_file, outputs=text)\n    b2.click(text_to_sentiment, inputs=text, outputs=label)\n\ndemo.launch()\n\n
\n\n

\n\n

Function Input List vs Dict

\n\n

The event listeners you've seen so far have a single input component. If you'd like to have multiple input components pass data to the function, you have two options on how the function can accept input component values:

\n\n
    \n
  1. as a list of arguments, or
  2. \n
  3. as a single dictionary of values, keyed by the component
  4. \n
\n\n

Let's see an example of each:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    a = gr.Number(label=\"a\")\n    b = gr.Number(label=\"b\")\n    with gr.Row():\n        add_btn = gr.Button(\"Add\")\n        sub_btn = gr.Button(\"Subtract\")\n    c = gr.Number(label=\"sum\")\n\n    def add(num1, num2):\n        return num1 + num2\n    add_btn.click(add, inputs=[a, b], outputs=c)\n\n    def sub(data):\n        return data[a] - data[b]\n    sub_btn.click(sub, inputs={a, b}, outputs=c)\n\n\ndemo.launch()\n
\n\n

Both add() and sub() take a and b as inputs. However, the syntax is different between these listeners.

\n\n
    \n
  1. To the add_btn listener, we pass the inputs as a list. The function add() takes each of these inputs as arguments. The value of a maps to the argument num1, and the value of b maps to the argument num2.
  2. \n
  3. To the sub_btn listener, we pass the inputs as a set (note the curly brackets!). The function sub() takes a single dictionary argument data, where the keys are the input components, and the values are the values of those components.
  4. \n
\n\n

It is a matter of preference which syntax you prefer! For functions with many input components, option 2 may be easier to manage.

\n\n

\n\n

Function Return List vs Dict

\n\n

Similarly, you may return values for multiple output components either as:

\n\n
    \n
  1. a list of values, or
  2. \n
  3. a dictionary keyed by the component
  4. \n
\n\n

Let's first see an example of (1), where we set the values of two output components by returning two values:

\n\n
with gr.Blocks() as demo:\n    food_box = gr.Number(value=10, label=\"Food Count\")\n    status_box = gr.Textbox()\n    def eat(food):\n        if food > 0:\n            return food - 1, \"full\"\n        else:\n            return 0, \"hungry\"\n    gr.Button(\"EAT\").click(\n        fn=eat, \n        inputs=food_box,\n        outputs=[food_box, status_box]\n    )\n
\n\n

Above, each return statement returns two values corresponding to food_box and status_box, respectively.

\n\n

Instead of returning a list of values corresponding to each output component in order, you can also return a dictionary, with the key corresponding to the output component and the value as the new value. This also allows you to skip updating some output components.

\n\n
with gr.Blocks() as demo:\n    food_box = gr.Number(value=10, label=\"Food Count\")\n    status_box = gr.Textbox()\n    def eat(food):\n        if food > 0:\n            return {food_box: food - 1, status_box: \"full\"}\n        else:\n            return {status_box: \"hungry\"}\n    gr.Button(\"EAT\").click(\n        fn=eat, \n        inputs=food_box,\n        outputs=[food_box, status_box]\n    )\n
\n\n

Notice how when there is no food, we only update the status_box element. We skipped updating the food_box component.

\n\n

Dictionary returns are helpful when an event listener affects many components on return, or conditionally affects outputs and not others.

\n\n

Keep in mind that with dictionary returns, we still need to specify the possible outputs in the event listener.

\n\n

Updating Component Configurations

\n\n

The return value of an event listener function is usually the updated value of the corresponding output Component. Sometimes we want to update the configuration of the Component as well, such as the visibility. In this case, we return a gr.update() object instead of just the update Component value.

\n\n
import gradio as gr\n\ndef change_textbox(choice):\n    if choice == \"short\":\n        return gr.update(lines=2, visible=True, value=\"Short story: \")\n    elif choice == \"long\":\n        return gr.update(lines=8, visible=True, value=\"Long story...\")\n    else:\n        return gr.update(visible=False)\n\nwith gr.Blocks() as demo:\n    radio = gr.Radio(\n        [\"short\", \"long\", \"none\"], label=\"Essay Length to Write?\"\n    )\n    text = gr.Textbox(lines=2, interactive=True)\n    radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\ndemo.launch()\n
\n\n

\n\n

See how we can configure the Textbox itself through the gr.update() method. The value= argument can still be used to update the value along with Component configuration.

\n\n

Running Events Consecutively

\n\n

You can also run events consecutively by using the then method of an event listener. This will run an event after the previous event has finished running. This is useful for running events that update components in multiple steps.

\n\n

For example, in the chatbot example below, we first update the chatbot with the user message immediately, and then update the chatbot with the computer response after a simulated delay.

\n\n
import gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.Button(\"Clear\")\n\n    def user(user_message, history):\n        return \"\", history + [[user_message, None]]\n\n    def bot(history):\n        bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n        time.sleep(2)\n        history[-1][1] = bot_message\n        return history\n\n    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.queue()\ndemo.launch()\n\n
\n\n

\n\n

The .then() method of an event listener executes the subsequent event regardless of whether the previous event raised any errors. If you'd like to only run subsequent events if the previous event executed successfully, use the .success() method, which takes the same arguments as .then().

\n\n

Running Events Continuously

\n\n

You can run events on a fixed schedule using the every parameter of the event listener. This will run the event\nevery number of seconds while the client connection is open. If the connection is closed, the event will stop running after the following iteration.\nNote that this does not take into account the runtime of the event itself. So a function\nwith a 1 second runtime running with every=5, would actually run every 6 seconds.

\n\n

Here is an example of a sine curve that updates every second!

\n\n
import math\nimport gradio as gr\nimport plotly.express as px\nimport numpy as np\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n    global plot_end\n    x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n    y = np.sin(2*math.pi*period * x)\n    fig = px.line(x=x, y=y)\n    plot_end += 2 * math.pi\n    if plot_end > 1000:\n        plot_end = 2 * math.pi\n    return fig\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n            period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n            plot = gr.Plot(label=\"Plot (updates every half second)\")\n\n    dep = demo.load(get_plot, None, plot, every=1)\n    period.change(get_plot, period, plot, every=1, cancels=[dep])\n\n\nif __name__ == \"__main__\":\n    demo.queue().launch()\n\n
\n\n

\n\n

Gathering Event Data

\n\n

You can gather specific data about an event by adding the associated event data class as a type hint to an argument in the event listener function.

\n\n

For example, event data for .select() can be type hinted by a gradio.SelectData argument. This event is triggered when a user selects some part of the triggering component, and the event data includes information about what the user specifically selected. If a user selected a specific word in a Textbox, a specific image in a Gallery, or a specific cell in a DataFrame, the event data argument would contain information about the specific selection.

\n\n

In the 2 player tic-tac-toe demo below, a user can select a cell in the DataFrame to make a move. The event data argument contains information about the specific cell that was selected. We can first check to see if the cell is empty, and then update the cell with the user's move.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n    board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n\n    def place(board, turn, evt: gr.SelectData):\n        if evt.value:\n            return board, turn\n        board[evt.index[0]][evt.index[1]] = turn\n        turn = \"O\" if turn == \"X\" else \"X\"\n        return board, turn\n\n    board.select(place, [board, turn], [board, turn])\n\ndemo.launch()\n
\n\n

\n", "tags": [], "spaces": [], "url": "/guides/blocks-and-event-listeners/", "contributor": null}, {"name": "controlling-layout", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 2, "absolute_index": 9, "pretty_name": "Controlling Layout", "content": "# Controlling Layout\n\nBy default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the [flexbox model of web development](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Flexible_Box_Layout/Basic_Concepts_of_Flexbox).\n\n## Rows\n\nElements within a `with gr.Row` clause will all be displayed horizontally. For example, to display two Buttons side by side:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")\n```\n\nTo make every element in a Row have the same height, use the `equal_height` argument of the `style` method.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row().style(equal_height=True):\n textbox = gr.Textbox()\n btn2 = gr.Button(\"Button 2\")\n```\n\nThe widths of elements in a Row can be controlled via a combination of `scale` and `min_width` arguments that are present in every Component.\n\n- `scale` is an integer that defines how an element will take up space in a Row. If scale is set to `0`, and element will not expand to take up space. If scale is set to `1` or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, `btn1` will expand twice as much as `btn2`, while `btn0` will not expand at all:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn0 = gr.Button(\"Button 0\", scale=0)\n btn1 = gr.Button(\"Button 1\", scale=1)\n btn2 = gr.Button(\"Button 2\", scale=2)\n```\n\n- `min_width` will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all `min_width` values.\n\nLearn more about Rows in the [docs](https://gradio.app/docs/#row).\n\n## Columns and Nesting\n\nComponents within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text1 = gr.Textbox(label=\"t1\")\n slider2 = gr.Textbox(label=\"s2\")\n drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n with gr.Row():\n with gr.Column(scale=1, min_width=600):\n text1 = gr.Textbox(label=\"prompt 1\")\n text2 = gr.Textbox(label=\"prompt 2\")\n inbtw = gr.Button(\"Between\")\n text4 = gr.Textbox(label=\"prompt 1\")\n text5 = gr.Textbox(label=\"prompt 2\")\n with gr.Column(scale=2, min_width=600):\n img1 = gr.Image(\"images/cheetah.jpg\")\n btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n```\n\n\nSee how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the `scale` parameter. The column with twice the `scale` value takes up twice the width.\n\nLearn more about Columns in the [docs](https://gradio.app/docs/#column).\n\n## Tabs and Accordions\n\nYou can also create Tabs using the `with gr.Tab('tab_name'):` clause. Any component created inside of a `with gr.Tab('tab_name'):` context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.\n\nFor example:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nAlso note the `gr.Accordion('label')` in this example. The Accordion is a layout that can be toggled open or closed. Like `Tabs`, it is a layout element that can selectively hide or show content. Any components that are defined inside of a `with gr.Accordion('label'):` will be hidden or shown when the accordion's toggle icon is clicked.\n\nLearn more about [Tabs](https://gradio.app/docs/#tab) and [Accordions](https://gradio.app/docs/#accordion) in the docs.\n\n## Visibility\n\nBoth Components and Layout elements have a `visible` argument that can set initially and also updated using `gr.update()`. Setting `gr.update(visible=...)` on a Column can be used to show or hide a set of Components.\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n error_box = gr.Textbox(label=\"Error\", visible=False)\n\n name_box = gr.Textbox(label=\"Name\")\n age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n submit_btn = gr.Button(\"Submit\")\n\n with gr.Column(visible=False) as output_col:\n diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n def submit(name, age, symptoms):\n if len(name) == 0:\n return {error_box: gr.update(value=\"Enter name\", visible=True)}\n return {\n output_col: gr.update(visible=True),\n diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n patient_summary_box: f\"{name}, {age} y/o\",\n }\n\n submit_btn.click(\n submit,\n [name_box, age_box, symptoms_box],\n [error_box, diagnosis_box, patient_summary_box, output_col],\n )\n\ndemo.launch()\n\n```\n\n\n## Variable Number of Outputs\n\nBy adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a *variable numbers of outputs*. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:\n\n```python\nimport gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n textboxes = []\n for i in range(max_textboxes):\n t = gr.Textbox(f\"Textbox {i}\")\n textboxes.append(t)\n\n s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n```\n\n\n## Defining and Rendering Components Separately\n\nIn some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using `gr.Examples` above the corresponding `gr.Textbox` input. Since `gr.Examples` requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the `gr.Examples` object.\n\nThe solution to this is to define the `gr.Textbox` outside of the `gr.Blocks()` scope and use the component's `.render()` method wherever you'd like it placed in the UI.\n\nHere's a full code example:\n\n```python\ninput_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n input_textbox.render()\n```\n", "html": "

Controlling Layout

\n\n

By default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the flexbox model of web development.

\n\n

Rows

\n\n

Elements within a with gr.Row clause will all be displayed horizontally. For example, to display two Buttons side by side:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn1 = gr.Button(\"Button 1\")\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

To make every element in a Row have the same height, use the equal_height argument of the style method.

\n\n
with gr.Blocks() as demo:\n    with gr.Row().style(equal_height=True):\n        textbox = gr.Textbox()\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

The widths of elements in a Row can be controlled via a combination of scale and min_width arguments that are present in every Component.

\n\n
    \n
  • scale is an integer that defines how an element will take up space in a Row. If scale is set to 0, and element will not expand to take up space. If scale is set to 1 or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, btn1 will expand twice as much as btn2, while btn0 will not expand at all:
  • \n
\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn0 = gr.Button(\"Button 0\", scale=0)\n        btn1 = gr.Button(\"Button 1\", scale=1)\n        btn2 = gr.Button(\"Button 2\", scale=2)\n
\n\n
    \n
  • min_width will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all min_width values.
  • \n
\n\n

Learn more about Rows in the docs.

\n\n

Columns and Nesting

\n\n

Components within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        text1 = gr.Textbox(label=\"t1\")\n        slider2 = gr.Textbox(label=\"s2\")\n        drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n    with gr.Row():\n        with gr.Column(scale=1, min_width=600):\n            text1 = gr.Textbox(label=\"prompt 1\")\n            text2 = gr.Textbox(label=\"prompt 2\")\n            inbtw = gr.Button(\"Between\")\n            text4 = gr.Textbox(label=\"prompt 1\")\n            text5 = gr.Textbox(label=\"prompt 2\")\n        with gr.Column(scale=2, min_width=600):\n            img1 = gr.Image(\"images/cheetah.jpg\")\n            btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n
\n\n

\n\n

See how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the scale parameter. The column with twice the scale value takes up twice the width.

\n\n

Learn more about Columns in the docs.

\n\n

Tabs and Accordions

\n\n

You can also create Tabs using the with gr.Tab('tab_name'): clause. Any component created inside of a with gr.Tab('tab_name'): context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.

\n\n

For example:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

Also note the gr.Accordion('label') in this example. The Accordion is a layout that can be toggled open or closed. Like Tabs, it is a layout element that can selectively hide or show content. Any components that are defined inside of a with gr.Accordion('label'): will be hidden or shown when the accordion's toggle icon is clicked.

\n\n

Learn more about Tabs and Accordions in the docs.

\n\n

Visibility

\n\n

Both Components and Layout elements have a visible argument that can set initially and also updated using gr.update(). Setting gr.update(visible=...) on a Column can be used to show or hide a set of Components.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    error_box = gr.Textbox(label=\"Error\", visible=False)\n\n    name_box = gr.Textbox(label=\"Name\")\n    age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n    symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n    submit_btn = gr.Button(\"Submit\")\n\n    with gr.Column(visible=False) as output_col:\n        diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n        patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n    def submit(name, age, symptoms):\n        if len(name) == 0:\n            return {error_box: gr.update(value=\"Enter name\", visible=True)}\n        return {\n            output_col: gr.update(visible=True),\n            diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n            patient_summary_box: f\"{name}, {age} y/o\",\n        }\n\n    submit_btn.click(\n        submit,\n        [name_box, age_box, symptoms_box],\n        [error_box, diagnosis_box, patient_summary_box, output_col],\n    )\n\ndemo.launch()\n\n
\n\n

\n\n

Variable Number of Outputs

\n\n

By adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a variable numbers of outputs. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:

\n\n
import gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n    k = int(k)\n    return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n    s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n    textboxes = []\n    for i in range(max_textboxes):\n        t = gr.Textbox(f\"Textbox {i}\")\n        textboxes.append(t)\n\n    s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n   demo.launch()\n\n
\n\n

\n\n

Defining and Rendering Components Separately

\n\n

In some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using gr.Examples above the corresponding gr.Textbox input. Since gr.Examples requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the gr.Examples object.

\n\n

The solution to this is to define the gr.Textbox outside of the gr.Blocks() scope and use the component's .render() method wherever you'd like it placed in the UI.

\n\n

Here's a full code example:

\n\n
input_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n    gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n    input_textbox.render()\n
\n", "tags": [], "spaces": [], "url": "/guides/controlling-layout/", "contributor": null}, {"name": "state-in-blocks", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 3, "absolute_index": 10, "pretty_name": "State In Blocks", "content": "# State in Blocks\n\nWe covered [State in Interfaces](https://gradio.app/interface-state), this guide takes a look at state in Blocks, which works mostly the same. \n\n## Global State\n\nGlobal state in Blocks works the same as in Interface. Any variable created outside a function call is a reference shared between all users.\n\n## Session State\n\nGradio supports session **state**, where data persists across multiple submits within a page session, in Blocks apps as well. To reiterate, session data is *not* shared between different users of your model. To store data in a session state, you need to do three things:\n\n1. Create a `gr.State()` object. If there is a default value to this stateful object, pass that into the constructor.\n2. In the event listener, put the `State` object as an input and output.\n3. In the event listener function, add the variable to the input parameters and the return value.\n\nLet's take a look at a game of hangman. \n\n```python\nimport gradio as gr\n\nsecret_word = \"gradio\"\n\nwith gr.Blocks() as demo: \n used_letters_var = gr.State([])\n with gr.Row() as row:\n with gr.Column():\n input_letter = gr.Textbox(label=\"Enter letter\")\n btn = gr.Button(\"Guess Letter\")\n with gr.Column():\n hangman = gr.Textbox(\n label=\"Hangman\",\n value=\"_\"*len(secret_word)\n )\n used_letters_box = gr.Textbox(label=\"Used Letters\")\n\n def guess_letter(letter, used_letters):\n used_letters.append(letter)\n answer = \"\".join([\n (letter if letter in used_letters else \"_\")\n for letter in secret_word\n ])\n return {\n used_letters_var: used_letters,\n used_letters_box: \", \".join(used_letters),\n hangman: answer\n }\n btn.click(\n guess_letter, \n [input_letter, used_letters_var],\n [used_letters_var, used_letters_box, hangman]\n )\ndemo.launch()\n```\n\n\nLet's see how we do each of the 3 steps listed above in this game:\n\n1. We store the used letters in `used_letters_var`. In the constructor of `State`, we set the initial value of this to `[]`, an empty list. \n2. In `btn.click()`, we have a reference to `used_letters_var` in both the inputs and outputs.\n3. In `guess_letter`, we pass the value of this `State` to `used_letters`, and then return an updated value of this `State` in the return statement.\n\nWith more complex apps, you will likely have many State variables storing session state in a single Blocks app.\n\nLearn more about `State` in the [docs](https://gradio.app/docs#state).\n\n\n\n", "html": "

State in Blocks

\n\n

We covered State in Interfaces, this guide takes a look at state in Blocks, which works mostly the same.

\n\n

Global State

\n\n

Global state in Blocks works the same as in Interface. Any variable created outside a function call is a reference shared between all users.

\n\n

Session State

\n\n

Gradio supports session state, where data persists across multiple submits within a page session, in Blocks apps as well. To reiterate, session data is not shared between different users of your model. To store data in a session state, you need to do three things:

\n\n
    \n
  1. Create a gr.State() object. If there is a default value to this stateful object, pass that into the constructor.
  2. \n
  3. In the event listener, put the State object as an input and output.
  4. \n
  5. In the event listener function, add the variable to the input parameters and the return value.
  6. \n
\n\n

Let's take a look at a game of hangman.

\n\n
import gradio as gr\n\nsecret_word = \"gradio\"\n\nwith gr.Blocks() as demo:    \n    used_letters_var = gr.State([])\n    with gr.Row() as row:\n        with gr.Column():\n            input_letter = gr.Textbox(label=\"Enter letter\")\n            btn = gr.Button(\"Guess Letter\")\n        with gr.Column():\n            hangman = gr.Textbox(\n                label=\"Hangman\",\n                value=\"_\"*len(secret_word)\n            )\n            used_letters_box = gr.Textbox(label=\"Used Letters\")\n\n    def guess_letter(letter, used_letters):\n        used_letters.append(letter)\n        answer = \"\".join([\n            (letter if letter in used_letters else \"_\")\n            for letter in secret_word\n        ])\n        return {\n            used_letters_var: used_letters,\n            used_letters_box: \", \".join(used_letters),\n            hangman: answer\n        }\n    btn.click(\n        guess_letter, \n        [input_letter, used_letters_var],\n        [used_letters_var, used_letters_box, hangman]\n        )\ndemo.launch()\n
\n\n

\n\n

Let's see how we do each of the 3 steps listed above in this game:

\n\n
    \n
  1. We store the used letters in used_letters_var. In the constructor of State, we set the initial value of this to [], an empty list.
  2. \n
  3. In btn.click(), we have a reference to used_letters_var in both the inputs and outputs.
  4. \n
  5. In guess_letter, we pass the value of this State to used_letters, and then return an updated value of this State in the return statement.
  6. \n
\n\n

With more complex apps, you will likely have many State variables storing session state in a single Blocks app.

\n\n

Learn more about State in the docs.

\n", "tags": [], "spaces": [], "url": "/guides/state-in-blocks/", "contributor": null}, {"name": "custom-CSS-and-JS", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 4, "absolute_index": 11, "pretty_name": "Custom CSS And JS", "content": "# Custom JS and CSS\n\nThis guide covers how to style Blocks with more flexibility, as well as adding Javascript code to event listeners. \n\n**Warning**: The use of query selectors in custom JS and CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using query selectors sparingly.\n\n## Custom CSS\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Blocks` constructor. For example:\n\n```python\nwith gr.Blocks(theme=gr.themes.Glass()):\n ...\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\n\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n```python\nwith gr.Blocks(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\nwith gr.Blocks(css=\".gradio-container {background: url('file=clouds.jpg')}\") as demo:\n ...\n```\n\nYou can also pass the filepath to a CSS file to the `css` argument. \n\n## The `elem_id` and `elem_classes` Arguments\n\nYou can `elem_id` to add an HTML element `id` to any component, and `elem_classes` to add a class or list of classes. This will allow you to select elements more easily with CSS. This approach is also more likely to be stable across Gradio versions as built-in class names or ids may change (however, as mentioned in the warning above, we cannot guarantee complete compatibility between Gradio versions if you use custom CSS as the DOM elements may themselves change).\n\n```python\ncss = \"\"\"\n#warning {background-color: #FFCCCB} \n.feedback textarea {font-size: 24px !important}\n\"\"\"\n\nwith gr.Blocks(css=css) as demo:\n box1 = gr.Textbox(value=\"Good Job\", elem_classes=\"feedback\")\n box2 = gr.Textbox(value=\"Failure\", elem_id=\"warning\", elem_classes=\"feedback\")\n```\n\nThe CSS `#warning` ruleset will only target the second Textbox, while the `.feedback` ruleset will target both. Note that when targeting classes, you might need to put the `!important` selector to override the default Gradio styles.\n\n## Custom JS\n\nEvent listeners have a `_js` argument that can take a Javascript function as a string and treat it just like a Python event listener function. You can pass both a Javascript function and a Python function (in which case the Javascript function is run first) or only Javascript (and set the Python `fn` to `None`). Take a look at the code below:\n\n```python\nimport gradio as gr\n\nblocks = gr.Blocks()\n\nwith blocks as demo:\n subject = gr.Textbox(placeholder=\"subject\")\n verb = gr.Radio([\"ate\", \"loved\", \"hated\"])\n object = gr.Textbox(placeholder=\"object\")\n\n with gr.Row():\n btn = gr.Button(\"Create sentence.\")\n reverse_btn = gr.Button(\"Reverse sentence.\")\n foo_bar_btn = gr.Button(\"Append foo\")\n reverse_then_to_the_server_btn = gr.Button(\n \"Reverse sentence and send to server.\"\n )\n\n def sentence_maker(w1, w2, w3):\n return f\"{w1} {w2} {w3}\"\n\n output1 = gr.Textbox(label=\"output 1\")\n output2 = gr.Textbox(label=\"verb\")\n output3 = gr.Textbox(label=\"verb reversed\")\n output4 = gr.Textbox(label=\"front end process and then send to backend\")\n\n btn.click(sentence_maker, [subject, verb, object], output1)\n reverse_btn.click(\n None, [subject, verb, object], output2, _js=\"(s, v, o) => o + ' ' + v + ' ' + s\"\n )\n verb.change(lambda x: x, verb, output3, _js=\"(x) => [...x].reverse().join('')\")\n foo_bar_btn.click(None, [], subject, _js=\"(x) => x + ' foo'\")\n\n reverse_then_to_the_server_btn.click(\n sentence_maker,\n [subject, verb, object],\n output4,\n _js=\"(s, v, o) => [s, v, o].map(x => [...x].reverse().join(''))\",\n )\n\ndemo.launch()\n\n```\n", "html": "

Custom JS and CSS

\n\n

This guide covers how to style Blocks with more flexibility, as well as adding Javascript code to event listeners.

\n\n

Warning: The use of query selectors in custom JS and CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using query selectors sparingly.

\n\n

Custom CSS

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Blocks constructor. For example:

\n\n
with gr.Blocks(theme=gr.themes.Glass()):\n    ...\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.

\n\n

The base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Blocks(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
with gr.Blocks(css=\".gradio-container {background: url('file=clouds.jpg')}\") as demo:\n    ...\n
\n\n

You can also pass the filepath to a CSS file to the css argument.

\n\n

The elem_id and elem_classes Arguments

\n\n

You can elem_id to add an HTML element id to any component, and elem_classes to add a class or list of classes. This will allow you to select elements more easily with CSS. This approach is also more likely to be stable across Gradio versions as built-in class names or ids may change (however, as mentioned in the warning above, we cannot guarantee complete compatibility between Gradio versions if you use custom CSS as the DOM elements may themselves change).

\n\n
css = \"\"\"\n#warning {background-color: #FFCCCB} \n.feedback textarea {font-size: 24px !important}\n\"\"\"\n\nwith gr.Blocks(css=css) as demo:\n    box1 = gr.Textbox(value=\"Good Job\", elem_classes=\"feedback\")\n    box2 = gr.Textbox(value=\"Failure\", elem_id=\"warning\", elem_classes=\"feedback\")\n
\n\n

The CSS #warning ruleset will only target the second Textbox, while the .feedback ruleset will target both. Note that when targeting classes, you might need to put the !important selector to override the default Gradio styles.

\n\n

Custom JS

\n\n

Event listeners have a _js argument that can take a Javascript function as a string and treat it just like a Python event listener function. You can pass both a Javascript function and a Python function (in which case the Javascript function is run first) or only Javascript (and set the Python fn to None). Take a look at the code below:

\n\n
import gradio as gr\n\nblocks = gr.Blocks()\n\nwith blocks as demo:\n    subject = gr.Textbox(placeholder=\"subject\")\n    verb = gr.Radio([\"ate\", \"loved\", \"hated\"])\n    object = gr.Textbox(placeholder=\"object\")\n\n    with gr.Row():\n        btn = gr.Button(\"Create sentence.\")\n        reverse_btn = gr.Button(\"Reverse sentence.\")\n        foo_bar_btn = gr.Button(\"Append foo\")\n        reverse_then_to_the_server_btn = gr.Button(\n            \"Reverse sentence and send to server.\"\n        )\n\n    def sentence_maker(w1, w2, w3):\n        return f\"{w1} {w2} {w3}\"\n\n    output1 = gr.Textbox(label=\"output 1\")\n    output2 = gr.Textbox(label=\"verb\")\n    output3 = gr.Textbox(label=\"verb reversed\")\n    output4 = gr.Textbox(label=\"front end process and then send to backend\")\n\n    btn.click(sentence_maker, [subject, verb, object], output1)\n    reverse_btn.click(\n        None, [subject, verb, object], output2, _js=\"(s, v, o) => o + ' ' + v + ' ' + s\"\n    )\n    verb.change(lambda x: x, verb, output3, _js=\"(x) => [...x].reverse().join('')\")\n    foo_bar_btn.click(None, [], subject, _js=\"(x) => x + ' foo'\")\n\n    reverse_then_to_the_server_btn.click(\n        sentence_maker,\n        [subject, verb, object],\n        output4,\n        _js=\"(s, v, o) => [s, v, o].map(x => [...x].reverse().join(''))\",\n    )\n\ndemo.launch()\n\n
\n\n

\n", "tags": [], "spaces": [], "url": "/guides/custom-CSS-and-JS/", "contributor": null}, {"name": "custom-interpretations-with-blocks", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 36, "pretty_name": "Custom Interpretations With Blocks", "content": "# Custom Machine Learning Interpretations with Blocks\n\n\n**Prerequisite**: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to [read the Guide to Blocks first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control) as well as the\ninterpretation section of the [Advanced Interface Features Guide](/advanced-interface-features#interpreting-your-predictions).\n\n## Introduction\n\nIf you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the `interpretation` parameter to either \"default\" or \"shap\".\n\nYou may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!\n\nThis guide will show how to:\n\n1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.\n2. Customize how interpretations are displayed in a Blocks app.\n\nLet's get started!\n\n## Setting up the Blocks app\n\nLet's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input `Textbox` and a single output `Label` component.\nBelow is the code for the app as well as the app itself.\n\n```python\nimport gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n pred = sentiment_classifier(text)\n return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n\n classify.click(classifier, input_text, label)\ndemo.launch()\n```\n\n \n\n## Adding interpretations to the app\n\nOur goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!\n\nFor each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those `(word, score)` pairs we can use gradio to visualize them for the user.\n\nThe [shap](https://shap.readthedocs.io/en/stable/index.html) library will help us compute the `(word, score)` pairs and\ngradio will take care of displaying the output to the user.\n\nThe following code computes the `(word, score)` pairs:\n\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n \n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n # Scores contains (word, score) pairs\n \n \n # Format expected by gr.components.Interpretation\n return {\"original\": text, \"interpretation\": scores}\n```\n\nNow, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use `gr.components.Interpretation`.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how `Interface` displays the interpretation output for text.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n interpretation = gr.components.Interpretation(input_text)\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n```\n\n \n\n\n## Customizing how the interpretation is displayed\n\nThe `gr.components.Interpretation` component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?\n\nOne way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.\n\nWe can do this by modifying our `interpretation_function` to additionally return a matplotlib bar plot.\nWe will display it with the `gr.Plot` component in a separate tab.\n\nThis is how the interpretation function will look:\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n # Filter out empty string added by shap\n scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n fig_m = plt.figure()\n \n # Select top 5 words that contribute to positive sentiment\n plt.bar(x=[s[0] for s in scores_desc[:5]],\n height=[s[1] for s in scores_desc[:5]])\n plt.title(\"Top words contributing to positive sentiment\")\n plt.ylabel(\"Shap Value\")\n plt.xlabel(\"Word\")\n return {\"original\": text, \"interpretation\": scores}, fig_m\n```\n\nAnd this is how the app code will look:\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n with gr.Tabs():\n with gr.TabItem(\"Display interpretation with built-in component\"):\n interpretation = gr.components.Interpretation(input_text)\n with gr.TabItem(\"Display interpretation with plot\"):\n interpretation_plot = gr.Plot()\n\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n```\n\nYou can see the demo below!\n\n \n\n## Beyond Sentiment Classification\nAlthough we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an `gr.Image` or `gr.Label` but the input can be almost anything (`gr.Number`, `gr.Slider`, `gr.Radio`, `gr.Image`).\n\nHere is a demo built with blocks of interpretations for an image classification model:\n\n \n\n\n## Closing remarks\n\nWe did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.\n\nWe also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.\n\nAdding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!\n", "html": "

Custom Machine Learning Interpretations with Blocks

\n\n

Prerequisite: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to read the Guide to Blocks first as well as the\ninterpretation section of the Advanced Interface Features Guide.

\n\n

Introduction

\n\n

If you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the interpretation parameter to either \"default\" or \"shap\".

\n\n

You may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!

\n\n

This guide will show how to:

\n\n
    \n
  1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.
  2. \n
  3. Customize how interpretations are displayed in a Blocks app.
  4. \n
\n\n

Let's get started!

\n\n

Setting up the Blocks app

\n\n

Let's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input Textbox and a single output Label component.\nBelow is the code for the app as well as the app itself.

\n\n
import gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n    pred = sentiment_classifier(text)\n    return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n\n    classify.click(classifier, input_text, label)\ndemo.launch()\n
\n\n

\n\n

Adding interpretations to the app

\n\n

Our goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!

\n\n

For each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those (word, score) pairs we can use gradio to visualize them for the user.

\n\n

The shap library will help us compute the (word, score) pairs and\ngradio will take care of displaying the output to the user.

\n\n

The following code computes the (word, score) pairs:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n    # Scores contains (word, score) pairs\n\n\n    # Format expected by gr.components.Interpretation\n    return {\"original\": text, \"interpretation\": scores}\n
\n\n

Now, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use gr.components.Interpretation.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how Interface displays the interpretation output for text.

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            interpretation = gr.components.Interpretation(input_text)\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n
\n\n

\n\n

Customizing how the interpretation is displayed

\n\n

The gr.components.Interpretation component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?

\n\n

One way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.

\n\n

We can do this by modifying our interpretation_function to additionally return a matplotlib bar plot.\nWe will display it with the gr.Plot component in a separate tab.

\n\n

This is how the interpretation function will look:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n    scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n    # Filter out empty string added by shap\n    scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n    fig_m = plt.figure()\n\n    # Select top 5 words that contribute to positive sentiment\n    plt.bar(x=[s[0] for s in scores_desc[:5]],\n            height=[s[1] for s in scores_desc[:5]])\n    plt.title(\"Top words contributing to positive sentiment\")\n    plt.ylabel(\"Shap Value\")\n    plt.xlabel(\"Word\")\n    return {\"original\": text, \"interpretation\": scores}, fig_m\n
\n\n

And this is how the app code will look:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            with gr.Tabs():\n                with gr.TabItem(\"Display interpretation with built-in component\"):\n                    interpretation = gr.components.Interpretation(input_text)\n                with gr.TabItem(\"Display interpretation with plot\"):\n                    interpretation_plot = gr.Plot()\n\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n
\n\n

You can see the demo below!

\n\n

\n\n

Beyond Sentiment Classification

\n\n

Although we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an gr.Image or gr.Label but the input can be almost anything (gr.Number, gr.Slider, gr.Radio, gr.Image).

\n\n

Here is a demo built with blocks of interpretations for an image classification model:

\n\n

\n\n

Closing remarks

\n\n

We did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.

\n\n

We also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.

\n\n

Adding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!

\n", "tags": ["INTERPRETATION", "SENTIMENT ANALYSIS"], "spaces": [], "url": "/guides/custom-interpretations-with-blocks/", "contributor": null}, {"name": "using-blocks-like-functions", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 5, "absolute_index": 12, "pretty_name": "Using Blocks Like Functions", "content": "# Using Gradio Blocks Like Functions\n\n\n\n**Prerequisite**: This Guide builds on the Blocks Introduction. Make sure to [read that guide first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control).\n\n## Introduction\n\nDid you know that apart from being a full-stack machine learning demo, a Gradio Blocks app is also a regular-old python function!?\n\nThis means that if you have a gradio Blocks (or Interface) app called `demo`, you can use `demo` like you would any python function.\n\nSo doing something like `output = demo(\"Hello\", \"friend\")` will run the first event defined in `demo` on the inputs \"Hello\" and \"friend\" and store it\nin the variable `output`.\n\nIf I put you to sleep \ud83e\udd71, please bear with me! By using apps like functions, you can seamlessly compose Gradio apps.\nThe following section will show how.\n\n## Treating Blocks like functions\n\nLet's say we have the following demo that translates english text to german text. \n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"t5-base\")\n\n\ndef translate(text):\n return pipe(text)[0][\"translation_text\"]\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n english = gr.Textbox(label=\"English text\")\n translate_btn = gr.Button(value=\"Translate\")\n with gr.Column():\n german = gr.Textbox(label=\"German Text\")\n\n translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n examples = gr.Examples(examples=[\"I went to the supermarket yesterday.\", \"Helen is a good swimmer.\"],\n inputs=[english])\n\ndemo.launch()\n```\n\nI already went ahead and hosted it in Hugging Face spaces at [gradio/english_translator](https://huggingface.co/spaces/gradio/english_translator).\n\nYou can see the demo below as well:\n\n\n\nNow, let's say you have an app that generates english text, but you wanted to additionally generate german text.\n\nYou could either:\n\n1. Copy the source code of my english-to-german translation and paste it in your app.\n\n2. Load my english-to-german translation in your app and treat it like a normal python function.\n\nOption 1 technically always works, but it often introduces unwanted complexity.\n\nOption 2 lets you borrow the functionality you want without tightly coupling our apps.\n\nAll you have to do is call the `Blocks.load` class method in your source file.\nAfter that, you can use my translation app like a regular python function!\n\nThe following code snippet and demo shows how to use `Blocks.load`.\n\nNote that the variable `english_translator` is my english to german app, but its used in `generate_text` like a regular function.\n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english_translator\")\nenglish_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n\n\ndef generate_text(text):\n english_text = english_generator(text)[0][\"generated_text\"]\n german_text = english_translator(english_text)\n return english_text, german_text\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n seed = gr.Text(label=\"Input Phrase\")\n with gr.Column():\n english = gr.Text(label=\"Generated English Text\")\n german = gr.Text(label=\"Generated German Text\")\n btn = gr.Button(\"Generate\")\n btn.click(generate_text, inputs=[seed], outputs=[english, german])\n gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n\ndemo.launch()\n```\n\n\n\n## How to control which function in the app to use\n\nIf the app you are loading defines more than one function, you can specify which function to use\nwith the `fn_index` and `api_name` parameters.\n\nIn the code for our english to german demo, you'll see the following line:\n\n```python\ntranslate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n```\n\nThe `api_name` gives this function a unique name in our app. You can use this name to tell gradio which\nfunction in the upstream space you want to use:\n\n```python\nenglish_generator(text, api_name=\"translate-to-german\")[0][\"generated_text\"]\n```\n\nYou can also use the `fn_index` parameter.\nImagine my app also defined an english to spanish translation function.\nIn order to use it in our text generation app, we would use the following code:\n\n```python\nenglish_generator(text, fn_index=1)[0][\"generated_text\"]\n```\n\nFunctions in gradio spaces are zero-indexed, so since the spanish translator would be the second function in my space,\nyou would use index 1. \n\n## Parting Remarks\n\nWe showed how treating a Blocks app like a regular python helps you compose functionality across different apps.\nAny Blocks app can be treated like a function, but a powerful pattern is to `load` an app hosted on \n[Hugging Face Spaces](https://huggingface.co/spaces) prior to treating it like a function in your own app.\nYou can also load models hosted on the [Hugging Face Model Hub](https://huggingface.co/models) - see the [Using Hugging Face Integrations](/using_hugging_face_integrations) guide for an example.\n\n### Happy building! \u2692\ufe0f\n", "html": "

Using Gradio Blocks Like Functions

\n\n

Prerequisite: This Guide builds on the Blocks Introduction. Make sure to read that guide first.

\n\n

Introduction

\n\n

Did you know that apart from being a full-stack machine learning demo, a Gradio Blocks app is also a regular-old python function!?

\n\n

This means that if you have a gradio Blocks (or Interface) app called demo, you can use demo like you would any python function.

\n\n

So doing something like output = demo(\"Hello\", \"friend\") will run the first event defined in demo on the inputs \"Hello\" and \"friend\" and store it\nin the variable output.

\n\n

If I put you to sleep \ud83e\udd71, please bear with me! By using apps like functions, you can seamlessly compose Gradio apps.\nThe following section will show how.

\n\n

Treating Blocks like functions

\n\n

Let's say we have the following demo that translates english text to german text.

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"t5-base\")\n\n\ndef translate(text):\n    return pipe(text)[0][\"translation_text\"]\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            english = gr.Textbox(label=\"English text\")\n            translate_btn = gr.Button(value=\"Translate\")\n        with gr.Column():\n            german = gr.Textbox(label=\"German Text\")\n\n    translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n    examples = gr.Examples(examples=[\"I went to the supermarket yesterday.\", \"Helen is a good swimmer.\"],\n                           inputs=[english])\n\ndemo.launch()\n
\n\n

I already went ahead and hosted it in Hugging Face spaces at gradio/english_translator.

\n\n

You can see the demo below as well:

\n\n

\n\n

Now, let's say you have an app that generates english text, but you wanted to additionally generate german text.

\n\n

You could either:

\n\n
    \n
  1. Copy the source code of my english-to-german translation and paste it in your app.

  2. \n
  3. Load my english-to-german translation in your app and treat it like a normal python function.

  4. \n
\n\n

Option 1 technically always works, but it often introduces unwanted complexity.

\n\n

Option 2 lets you borrow the functionality you want without tightly coupling our apps.

\n\n

All you have to do is call the Blocks.load class method in your source file.\nAfter that, you can use my translation app like a regular python function!

\n\n

The following code snippet and demo shows how to use Blocks.load.

\n\n

Note that the variable english_translator is my english to german app, but its used in generate_text like a regular function.

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english_translator\")\nenglish_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n\n\ndef generate_text(text):\n    english_text = english_generator(text)[0][\"generated_text\"]\n    german_text = english_translator(english_text)\n    return english_text, german_text\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            seed = gr.Text(label=\"Input Phrase\")\n        with gr.Column():\n            english = gr.Text(label=\"Generated English Text\")\n            german = gr.Text(label=\"Generated German Text\")\n    btn = gr.Button(\"Generate\")\n    btn.click(generate_text, inputs=[seed], outputs=[english, german])\n    gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n\ndemo.launch()\n
\n\n

\n\n

How to control which function in the app to use

\n\n

If the app you are loading defines more than one function, you can specify which function to use\nwith the fn_index and api_name parameters.

\n\n

In the code for our english to german demo, you'll see the following line:

\n\n
translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n
\n\n

The api_name gives this function a unique name in our app. You can use this name to tell gradio which\nfunction in the upstream space you want to use:

\n\n
english_generator(text, api_name=\"translate-to-german\")[0][\"generated_text\"]\n
\n\n

You can also use the fn_index parameter.\nImagine my app also defined an english to spanish translation function.\nIn order to use it in our text generation app, we would use the following code:

\n\n
english_generator(text, fn_index=1)[0][\"generated_text\"]\n
\n\n

Functions in gradio spaces are zero-indexed, so since the spanish translator would be the second function in my space,\nyou would use index 1.

\n\n

Parting Remarks

\n\n

We showed how treating a Blocks app like a regular python helps you compose functionality across different apps.\nAny Blocks app can be treated like a function, but a powerful pattern is to load an app hosted on \nHugging Face Spaces prior to treating it like a function in your own app.\nYou can also load models hosted on the Hugging Face Model Hub - see the Using Hugging Face Integrations guide for an example.

\n\n

Happy building! \u2692\ufe0f

\n", "tags": ["TRANSLATION", "HUB", "SPACES"], "spaces": [], "url": "/guides/using-blocks-like-functions/", "contributor": null}], "override_signature": "with gradio.Blocks():", "parent": "gradio"}, "changeable": {"class": null, "name": "Changeable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Changeable"}], "parent": "gradio"}, "inputable": {"class": null, "name": "Inputable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "input", "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Inputable"}], "parent": "gradio"}, "clickable": {"class": null, "name": "Clickable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "click", "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Clickable"}], "parent": "gradio"}, "submittable": {"class": null, "name": "Submittable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "submit", "description": "This listener is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Submittable"}], "parent": "gradio"}, "editable": {"class": null, "name": "Editable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "edit", "description": "This listener is triggered when the user edits the component (e.g. image) using the built-in editor. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Editable"}], "parent": "gradio"}, "clearable": {"class": null, "name": "Clearable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "clear", "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Clearable"}], "parent": "gradio"}, "playable": {"class": null, "name": "Playable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "play", "description": "This listener is triggered when the user plays the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Playable"}, {"fn": null, "name": "pause", "description": "This listener is triggered when the media stops playing for any reason (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Playable"}, {"fn": null, "name": "stop", "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Playable"}, {"fn": null, "name": "end", "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Playable"}], "parent": "gradio"}, "streamable": {"class": null, "name": "Streamable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "stream", "description": "This listener is triggered when the user streams the component (e.g. a live webcam component). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Streamable"}], "parent": "gradio"}, "recordable": {"class": null, "name": "Recordable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "start_recording", "description": "This listener is triggered when the user starts recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Recordable"}, {"fn": null, "name": "stop_recording", "description": "This listener is triggered when the user stops recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Recordable"}], "parent": "gradio"}, "focusable": {"class": null, "name": "Focusable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "focus", "description": "This listener is triggered when the component is focused (e.g. when the user clicks inside a textbox). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Focusable"}, {"fn": null, "name": "blur", "description": "This listener is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Focusable"}], "parent": "gradio"}, "uploadable": {"class": null, "name": "Uploadable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "upload", "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Uploadable"}], "parent": "gradio"}, "releaseable": {"class": null, "name": "Releaseable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "release", "description": "This listener is triggered when the user releases the mouse on this component (e.g. when the user releases the slider). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Releaseable"}], "parent": "gradio"}, "selectable": {"class": null, "name": "Selectable", "description": "", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {"annotation": null}, "example": "", "fns": [{"fn": null, "name": "select", "description": "This listener is triggered when the user selects from within the Component. This event has EventData of type gradio.SelectData that carries information, accessible through SelectData.index and SelectData.value. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Selectable"}], "parent": "gradio"}, "row": {"class": null, "name": "Row", "description": "Row is a layout element within Blocks that renders all children horizontally.", "tags": {"guides": "controlling-layout"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "variant", "annotation": "Literal['default', 'panel', 'compact']", "doc": "row type, 'default' (no background), 'panel' (gray background color and rounded corners), or 'compact' (rounded corners and no internal gap).", "default": "\"default\""}, {"name": "visible", "annotation": "bool", "doc": "If False, row will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "equal_height", "annotation": "bool", "doc": "If True, makes every child element have equal height", "default": "True"}], "returns": {"annotation": null}, "example": "with gr.Blocks() as demo:\n with gr.Row():\n gr.Image(\"lion.jpg\", scale=2)\n gr.Image(\"tiger.jpg\", scale=1)\ndemo.launch()", "fns": [], "guides": [{"name": "controlling-layout", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 2, "absolute_index": 9, "pretty_name": "Controlling Layout", "content": "# Controlling Layout\n\nBy default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the [flexbox model of web development](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Flexible_Box_Layout/Basic_Concepts_of_Flexbox).\n\n## Rows\n\nElements within a `with gr.Row` clause will all be displayed horizontally. For example, to display two Buttons side by side:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")\n```\n\nTo make every element in a Row have the same height, use the `equal_height` argument of the `style` method.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row().style(equal_height=True):\n textbox = gr.Textbox()\n btn2 = gr.Button(\"Button 2\")\n```\n\nThe widths of elements in a Row can be controlled via a combination of `scale` and `min_width` arguments that are present in every Component.\n\n- `scale` is an integer that defines how an element will take up space in a Row. If scale is set to `0`, and element will not expand to take up space. If scale is set to `1` or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, `btn1` will expand twice as much as `btn2`, while `btn0` will not expand at all:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn0 = gr.Button(\"Button 0\", scale=0)\n btn1 = gr.Button(\"Button 1\", scale=1)\n btn2 = gr.Button(\"Button 2\", scale=2)\n```\n\n- `min_width` will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all `min_width` values.\n\nLearn more about Rows in the [docs](https://gradio.app/docs/#row).\n\n## Columns and Nesting\n\nComponents within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text1 = gr.Textbox(label=\"t1\")\n slider2 = gr.Textbox(label=\"s2\")\n drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n with gr.Row():\n with gr.Column(scale=1, min_width=600):\n text1 = gr.Textbox(label=\"prompt 1\")\n text2 = gr.Textbox(label=\"prompt 2\")\n inbtw = gr.Button(\"Between\")\n text4 = gr.Textbox(label=\"prompt 1\")\n text5 = gr.Textbox(label=\"prompt 2\")\n with gr.Column(scale=2, min_width=600):\n img1 = gr.Image(\"images/cheetah.jpg\")\n btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n```\n\n\nSee how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the `scale` parameter. The column with twice the `scale` value takes up twice the width.\n\nLearn more about Columns in the [docs](https://gradio.app/docs/#column).\n\n## Tabs and Accordions\n\nYou can also create Tabs using the `with gr.Tab('tab_name'):` clause. Any component created inside of a `with gr.Tab('tab_name'):` context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.\n\nFor example:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nAlso note the `gr.Accordion('label')` in this example. The Accordion is a layout that can be toggled open or closed. Like `Tabs`, it is a layout element that can selectively hide or show content. Any components that are defined inside of a `with gr.Accordion('label'):` will be hidden or shown when the accordion's toggle icon is clicked.\n\nLearn more about [Tabs](https://gradio.app/docs/#tab) and [Accordions](https://gradio.app/docs/#accordion) in the docs.\n\n## Visibility\n\nBoth Components and Layout elements have a `visible` argument that can set initially and also updated using `gr.update()`. Setting `gr.update(visible=...)` on a Column can be used to show or hide a set of Components.\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n error_box = gr.Textbox(label=\"Error\", visible=False)\n\n name_box = gr.Textbox(label=\"Name\")\n age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n submit_btn = gr.Button(\"Submit\")\n\n with gr.Column(visible=False) as output_col:\n diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n def submit(name, age, symptoms):\n if len(name) == 0:\n return {error_box: gr.update(value=\"Enter name\", visible=True)}\n return {\n output_col: gr.update(visible=True),\n diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n patient_summary_box: f\"{name}, {age} y/o\",\n }\n\n submit_btn.click(\n submit,\n [name_box, age_box, symptoms_box],\n [error_box, diagnosis_box, patient_summary_box, output_col],\n )\n\ndemo.launch()\n\n```\n\n\n## Variable Number of Outputs\n\nBy adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a *variable numbers of outputs*. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:\n\n```python\nimport gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n textboxes = []\n for i in range(max_textboxes):\n t = gr.Textbox(f\"Textbox {i}\")\n textboxes.append(t)\n\n s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n```\n\n\n## Defining and Rendering Components Separately\n\nIn some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using `gr.Examples` above the corresponding `gr.Textbox` input. Since `gr.Examples` requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the `gr.Examples` object.\n\nThe solution to this is to define the `gr.Textbox` outside of the `gr.Blocks()` scope and use the component's `.render()` method wherever you'd like it placed in the UI.\n\nHere's a full code example:\n\n```python\ninput_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n input_textbox.render()\n```\n", "html": "

Controlling Layout

\n\n

By default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the flexbox model of web development.

\n\n

Rows

\n\n

Elements within a with gr.Row clause will all be displayed horizontally. For example, to display two Buttons side by side:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn1 = gr.Button(\"Button 1\")\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

To make every element in a Row have the same height, use the equal_height argument of the style method.

\n\n
with gr.Blocks() as demo:\n    with gr.Row().style(equal_height=True):\n        textbox = gr.Textbox()\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

The widths of elements in a Row can be controlled via a combination of scale and min_width arguments that are present in every Component.

\n\n
    \n
  • scale is an integer that defines how an element will take up space in a Row. If scale is set to 0, and element will not expand to take up space. If scale is set to 1 or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, btn1 will expand twice as much as btn2, while btn0 will not expand at all:
  • \n
\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn0 = gr.Button(\"Button 0\", scale=0)\n        btn1 = gr.Button(\"Button 1\", scale=1)\n        btn2 = gr.Button(\"Button 2\", scale=2)\n
\n\n
    \n
  • min_width will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all min_width values.
  • \n
\n\n

Learn more about Rows in the docs.

\n\n

Columns and Nesting

\n\n

Components within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        text1 = gr.Textbox(label=\"t1\")\n        slider2 = gr.Textbox(label=\"s2\")\n        drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n    with gr.Row():\n        with gr.Column(scale=1, min_width=600):\n            text1 = gr.Textbox(label=\"prompt 1\")\n            text2 = gr.Textbox(label=\"prompt 2\")\n            inbtw = gr.Button(\"Between\")\n            text4 = gr.Textbox(label=\"prompt 1\")\n            text5 = gr.Textbox(label=\"prompt 2\")\n        with gr.Column(scale=2, min_width=600):\n            img1 = gr.Image(\"images/cheetah.jpg\")\n            btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n
\n\n

\n\n

See how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the scale parameter. The column with twice the scale value takes up twice the width.

\n\n

Learn more about Columns in the docs.

\n\n

Tabs and Accordions

\n\n

You can also create Tabs using the with gr.Tab('tab_name'): clause. Any component created inside of a with gr.Tab('tab_name'): context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.

\n\n

For example:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

Also note the gr.Accordion('label') in this example. The Accordion is a layout that can be toggled open or closed. Like Tabs, it is a layout element that can selectively hide or show content. Any components that are defined inside of a with gr.Accordion('label'): will be hidden or shown when the accordion's toggle icon is clicked.

\n\n

Learn more about Tabs and Accordions in the docs.

\n\n

Visibility

\n\n

Both Components and Layout elements have a visible argument that can set initially and also updated using gr.update(). Setting gr.update(visible=...) on a Column can be used to show or hide a set of Components.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    error_box = gr.Textbox(label=\"Error\", visible=False)\n\n    name_box = gr.Textbox(label=\"Name\")\n    age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n    symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n    submit_btn = gr.Button(\"Submit\")\n\n    with gr.Column(visible=False) as output_col:\n        diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n        patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n    def submit(name, age, symptoms):\n        if len(name) == 0:\n            return {error_box: gr.update(value=\"Enter name\", visible=True)}\n        return {\n            output_col: gr.update(visible=True),\n            diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n            patient_summary_box: f\"{name}, {age} y/o\",\n        }\n\n    submit_btn.click(\n        submit,\n        [name_box, age_box, symptoms_box],\n        [error_box, diagnosis_box, patient_summary_box, output_col],\n    )\n\ndemo.launch()\n\n
\n\n

\n\n

Variable Number of Outputs

\n\n

By adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a variable numbers of outputs. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:

\n\n
import gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n    k = int(k)\n    return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n    s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n    textboxes = []\n    for i in range(max_textboxes):\n        t = gr.Textbox(f\"Textbox {i}\")\n        textboxes.append(t)\n\n    s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n   demo.launch()\n\n
\n\n

\n\n

Defining and Rendering Components Separately

\n\n

In some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using gr.Examples above the corresponding gr.Textbox input. Since gr.Examples requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the gr.Examples object.

\n\n

The solution to this is to define the gr.Textbox outside of the gr.Blocks() scope and use the component's .render() method wherever you'd like it placed in the UI.

\n\n

Here's a full code example:

\n\n
input_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n    gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n    input_textbox.render()\n
\n", "tags": [], "spaces": [], "url": "/guides/controlling-layout/", "contributor": null}], "override_signature": "with gradio.Row():", "parent": "gradio"}, "column": {"class": null, "name": "Column", "description": "Column is a layout element within Blocks that renders all children vertically. The widths of columns can be set through the `scale` and `min_width` parameters. If a certain scale results in a column narrower than min_width, the min_width parameter will win.", "tags": {"guides": "controlling-layout"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "scale", "annotation": "int", "doc": "relative width compared to adjacent Columns. For example, if Column A has scale=2, and Column B has scale=1, A will be twice as wide as B.", "default": "1"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width of Column, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in a column narrower than min_width, the min_width parameter will be respected first.", "default": "320"}, {"name": "variant", "annotation": "Literal['default', 'panel', 'compact']", "doc": "column type, 'default' (no background), 'panel' (gray background color and rounded corners), or 'compact' (rounded corners and no internal gap).", "default": "\"default\""}, {"name": "visible", "annotation": "bool", "doc": "If False, column will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": "with gr.Blocks() as demo:\n with gr.Row():\n with gr.Column(scale=1):\n text1 = gr.Textbox()\n text2 = gr.Textbox()\n with gr.Column(scale=4):\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")", "fns": [], "guides": [{"name": "controlling-layout", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 2, "absolute_index": 9, "pretty_name": "Controlling Layout", "content": "# Controlling Layout\n\nBy default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the [flexbox model of web development](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Flexible_Box_Layout/Basic_Concepts_of_Flexbox).\n\n## Rows\n\nElements within a `with gr.Row` clause will all be displayed horizontally. For example, to display two Buttons side by side:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")\n```\n\nTo make every element in a Row have the same height, use the `equal_height` argument of the `style` method.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row().style(equal_height=True):\n textbox = gr.Textbox()\n btn2 = gr.Button(\"Button 2\")\n```\n\nThe widths of elements in a Row can be controlled via a combination of `scale` and `min_width` arguments that are present in every Component.\n\n- `scale` is an integer that defines how an element will take up space in a Row. If scale is set to `0`, and element will not expand to take up space. If scale is set to `1` or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, `btn1` will expand twice as much as `btn2`, while `btn0` will not expand at all:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn0 = gr.Button(\"Button 0\", scale=0)\n btn1 = gr.Button(\"Button 1\", scale=1)\n btn2 = gr.Button(\"Button 2\", scale=2)\n```\n\n- `min_width` will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all `min_width` values.\n\nLearn more about Rows in the [docs](https://gradio.app/docs/#row).\n\n## Columns and Nesting\n\nComponents within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text1 = gr.Textbox(label=\"t1\")\n slider2 = gr.Textbox(label=\"s2\")\n drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n with gr.Row():\n with gr.Column(scale=1, min_width=600):\n text1 = gr.Textbox(label=\"prompt 1\")\n text2 = gr.Textbox(label=\"prompt 2\")\n inbtw = gr.Button(\"Between\")\n text4 = gr.Textbox(label=\"prompt 1\")\n text5 = gr.Textbox(label=\"prompt 2\")\n with gr.Column(scale=2, min_width=600):\n img1 = gr.Image(\"images/cheetah.jpg\")\n btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n```\n\n\nSee how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the `scale` parameter. The column with twice the `scale` value takes up twice the width.\n\nLearn more about Columns in the [docs](https://gradio.app/docs/#column).\n\n## Tabs and Accordions\n\nYou can also create Tabs using the `with gr.Tab('tab_name'):` clause. Any component created inside of a `with gr.Tab('tab_name'):` context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.\n\nFor example:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nAlso note the `gr.Accordion('label')` in this example. The Accordion is a layout that can be toggled open or closed. Like `Tabs`, it is a layout element that can selectively hide or show content. Any components that are defined inside of a `with gr.Accordion('label'):` will be hidden or shown when the accordion's toggle icon is clicked.\n\nLearn more about [Tabs](https://gradio.app/docs/#tab) and [Accordions](https://gradio.app/docs/#accordion) in the docs.\n\n## Visibility\n\nBoth Components and Layout elements have a `visible` argument that can set initially and also updated using `gr.update()`. Setting `gr.update(visible=...)` on a Column can be used to show or hide a set of Components.\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n error_box = gr.Textbox(label=\"Error\", visible=False)\n\n name_box = gr.Textbox(label=\"Name\")\n age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n submit_btn = gr.Button(\"Submit\")\n\n with gr.Column(visible=False) as output_col:\n diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n def submit(name, age, symptoms):\n if len(name) == 0:\n return {error_box: gr.update(value=\"Enter name\", visible=True)}\n return {\n output_col: gr.update(visible=True),\n diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n patient_summary_box: f\"{name}, {age} y/o\",\n }\n\n submit_btn.click(\n submit,\n [name_box, age_box, symptoms_box],\n [error_box, diagnosis_box, patient_summary_box, output_col],\n )\n\ndemo.launch()\n\n```\n\n\n## Variable Number of Outputs\n\nBy adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a *variable numbers of outputs*. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:\n\n```python\nimport gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n textboxes = []\n for i in range(max_textboxes):\n t = gr.Textbox(f\"Textbox {i}\")\n textboxes.append(t)\n\n s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n```\n\n\n## Defining and Rendering Components Separately\n\nIn some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using `gr.Examples` above the corresponding `gr.Textbox` input. Since `gr.Examples` requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the `gr.Examples` object.\n\nThe solution to this is to define the `gr.Textbox` outside of the `gr.Blocks()` scope and use the component's `.render()` method wherever you'd like it placed in the UI.\n\nHere's a full code example:\n\n```python\ninput_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n input_textbox.render()\n```\n", "html": "

Controlling Layout

\n\n

By default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the flexbox model of web development.

\n\n

Rows

\n\n

Elements within a with gr.Row clause will all be displayed horizontally. For example, to display two Buttons side by side:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn1 = gr.Button(\"Button 1\")\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

To make every element in a Row have the same height, use the equal_height argument of the style method.

\n\n
with gr.Blocks() as demo:\n    with gr.Row().style(equal_height=True):\n        textbox = gr.Textbox()\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

The widths of elements in a Row can be controlled via a combination of scale and min_width arguments that are present in every Component.

\n\n
    \n
  • scale is an integer that defines how an element will take up space in a Row. If scale is set to 0, and element will not expand to take up space. If scale is set to 1 or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, btn1 will expand twice as much as btn2, while btn0 will not expand at all:
  • \n
\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn0 = gr.Button(\"Button 0\", scale=0)\n        btn1 = gr.Button(\"Button 1\", scale=1)\n        btn2 = gr.Button(\"Button 2\", scale=2)\n
\n\n
    \n
  • min_width will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all min_width values.
  • \n
\n\n

Learn more about Rows in the docs.

\n\n

Columns and Nesting

\n\n

Components within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        text1 = gr.Textbox(label=\"t1\")\n        slider2 = gr.Textbox(label=\"s2\")\n        drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n    with gr.Row():\n        with gr.Column(scale=1, min_width=600):\n            text1 = gr.Textbox(label=\"prompt 1\")\n            text2 = gr.Textbox(label=\"prompt 2\")\n            inbtw = gr.Button(\"Between\")\n            text4 = gr.Textbox(label=\"prompt 1\")\n            text5 = gr.Textbox(label=\"prompt 2\")\n        with gr.Column(scale=2, min_width=600):\n            img1 = gr.Image(\"images/cheetah.jpg\")\n            btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n
\n\n

\n\n

See how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the scale parameter. The column with twice the scale value takes up twice the width.

\n\n

Learn more about Columns in the docs.

\n\n

Tabs and Accordions

\n\n

You can also create Tabs using the with gr.Tab('tab_name'): clause. Any component created inside of a with gr.Tab('tab_name'): context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.

\n\n

For example:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

Also note the gr.Accordion('label') in this example. The Accordion is a layout that can be toggled open or closed. Like Tabs, it is a layout element that can selectively hide or show content. Any components that are defined inside of a with gr.Accordion('label'): will be hidden or shown when the accordion's toggle icon is clicked.

\n\n

Learn more about Tabs and Accordions in the docs.

\n\n

Visibility

\n\n

Both Components and Layout elements have a visible argument that can set initially and also updated using gr.update(). Setting gr.update(visible=...) on a Column can be used to show or hide a set of Components.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    error_box = gr.Textbox(label=\"Error\", visible=False)\n\n    name_box = gr.Textbox(label=\"Name\")\n    age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n    symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n    submit_btn = gr.Button(\"Submit\")\n\n    with gr.Column(visible=False) as output_col:\n        diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n        patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n    def submit(name, age, symptoms):\n        if len(name) == 0:\n            return {error_box: gr.update(value=\"Enter name\", visible=True)}\n        return {\n            output_col: gr.update(visible=True),\n            diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n            patient_summary_box: f\"{name}, {age} y/o\",\n        }\n\n    submit_btn.click(\n        submit,\n        [name_box, age_box, symptoms_box],\n        [error_box, diagnosis_box, patient_summary_box, output_col],\n    )\n\ndemo.launch()\n\n
\n\n

\n\n

Variable Number of Outputs

\n\n

By adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a variable numbers of outputs. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:

\n\n
import gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n    k = int(k)\n    return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n    s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n    textboxes = []\n    for i in range(max_textboxes):\n        t = gr.Textbox(f\"Textbox {i}\")\n        textboxes.append(t)\n\n    s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n   demo.launch()\n\n
\n\n

\n\n

Defining and Rendering Components Separately

\n\n

In some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using gr.Examples above the corresponding gr.Textbox input. Since gr.Examples requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the gr.Examples object.

\n\n

The solution to this is to define the gr.Textbox outside of the gr.Blocks() scope and use the component's .render() method wherever you'd like it placed in the UI.

\n\n

Here's a full code example:

\n\n
input_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n    gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n    input_textbox.render()\n
\n", "tags": [], "spaces": [], "url": "/guides/controlling-layout/", "contributor": null}], "override_signature": "with gradio.Column():", "parent": "gradio"}, "tab": {"class": null, "name": "Tab", "description": "Tab (or its alias TabItem) is a layout element. Components defined within the Tab will be visible when this tab is selected tab.", "tags": {"guides": "controlling-layout"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "label", "annotation": "str", "doc": "The visual label for the tab"}, {"name": "id", "annotation": "int | str | None", "doc": "An optional identifier for the tab, required if you wish to control the selected tab from a predict function.", "default": "None"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": "with gr.Blocks() as demo:\n with gr.Tab(\"Lion\"):\n gr.Image(\"lion.jpg\")\n gr.Button(\"New Lion\")\n with gr.Tab(\"Tiger\"):\n gr.Image(\"tiger.jpg\")\n gr.Button(\"New Tiger\")", "fns": [{"fn": null, "name": "select", "description": "This listener is triggered when the user selects from within the Component. This event has EventData of type gradio.SelectData that carries information, accessible through SelectData.index and SelectData.value. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Tab"}], "guides": [{"name": "controlling-layout", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 2, "absolute_index": 9, "pretty_name": "Controlling Layout", "content": "# Controlling Layout\n\nBy default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the [flexbox model of web development](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Flexible_Box_Layout/Basic_Concepts_of_Flexbox).\n\n## Rows\n\nElements within a `with gr.Row` clause will all be displayed horizontally. For example, to display two Buttons side by side:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")\n```\n\nTo make every element in a Row have the same height, use the `equal_height` argument of the `style` method.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row().style(equal_height=True):\n textbox = gr.Textbox()\n btn2 = gr.Button(\"Button 2\")\n```\n\nThe widths of elements in a Row can be controlled via a combination of `scale` and `min_width` arguments that are present in every Component.\n\n- `scale` is an integer that defines how an element will take up space in a Row. If scale is set to `0`, and element will not expand to take up space. If scale is set to `1` or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, `btn1` will expand twice as much as `btn2`, while `btn0` will not expand at all:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn0 = gr.Button(\"Button 0\", scale=0)\n btn1 = gr.Button(\"Button 1\", scale=1)\n btn2 = gr.Button(\"Button 2\", scale=2)\n```\n\n- `min_width` will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all `min_width` values.\n\nLearn more about Rows in the [docs](https://gradio.app/docs/#row).\n\n## Columns and Nesting\n\nComponents within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text1 = gr.Textbox(label=\"t1\")\n slider2 = gr.Textbox(label=\"s2\")\n drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n with gr.Row():\n with gr.Column(scale=1, min_width=600):\n text1 = gr.Textbox(label=\"prompt 1\")\n text2 = gr.Textbox(label=\"prompt 2\")\n inbtw = gr.Button(\"Between\")\n text4 = gr.Textbox(label=\"prompt 1\")\n text5 = gr.Textbox(label=\"prompt 2\")\n with gr.Column(scale=2, min_width=600):\n img1 = gr.Image(\"images/cheetah.jpg\")\n btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n```\n\n\nSee how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the `scale` parameter. The column with twice the `scale` value takes up twice the width.\n\nLearn more about Columns in the [docs](https://gradio.app/docs/#column).\n\n## Tabs and Accordions\n\nYou can also create Tabs using the `with gr.Tab('tab_name'):` clause. Any component created inside of a `with gr.Tab('tab_name'):` context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.\n\nFor example:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nAlso note the `gr.Accordion('label')` in this example. The Accordion is a layout that can be toggled open or closed. Like `Tabs`, it is a layout element that can selectively hide or show content. Any components that are defined inside of a `with gr.Accordion('label'):` will be hidden or shown when the accordion's toggle icon is clicked.\n\nLearn more about [Tabs](https://gradio.app/docs/#tab) and [Accordions](https://gradio.app/docs/#accordion) in the docs.\n\n## Visibility\n\nBoth Components and Layout elements have a `visible` argument that can set initially and also updated using `gr.update()`. Setting `gr.update(visible=...)` on a Column can be used to show or hide a set of Components.\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n error_box = gr.Textbox(label=\"Error\", visible=False)\n\n name_box = gr.Textbox(label=\"Name\")\n age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n submit_btn = gr.Button(\"Submit\")\n\n with gr.Column(visible=False) as output_col:\n diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n def submit(name, age, symptoms):\n if len(name) == 0:\n return {error_box: gr.update(value=\"Enter name\", visible=True)}\n return {\n output_col: gr.update(visible=True),\n diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n patient_summary_box: f\"{name}, {age} y/o\",\n }\n\n submit_btn.click(\n submit,\n [name_box, age_box, symptoms_box],\n [error_box, diagnosis_box, patient_summary_box, output_col],\n )\n\ndemo.launch()\n\n```\n\n\n## Variable Number of Outputs\n\nBy adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a *variable numbers of outputs*. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:\n\n```python\nimport gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n textboxes = []\n for i in range(max_textboxes):\n t = gr.Textbox(f\"Textbox {i}\")\n textboxes.append(t)\n\n s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n```\n\n\n## Defining and Rendering Components Separately\n\nIn some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using `gr.Examples` above the corresponding `gr.Textbox` input. Since `gr.Examples` requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the `gr.Examples` object.\n\nThe solution to this is to define the `gr.Textbox` outside of the `gr.Blocks()` scope and use the component's `.render()` method wherever you'd like it placed in the UI.\n\nHere's a full code example:\n\n```python\ninput_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n input_textbox.render()\n```\n", "html": "

Controlling Layout

\n\n

By default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the flexbox model of web development.

\n\n

Rows

\n\n

Elements within a with gr.Row clause will all be displayed horizontally. For example, to display two Buttons side by side:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn1 = gr.Button(\"Button 1\")\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

To make every element in a Row have the same height, use the equal_height argument of the style method.

\n\n
with gr.Blocks() as demo:\n    with gr.Row().style(equal_height=True):\n        textbox = gr.Textbox()\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

The widths of elements in a Row can be controlled via a combination of scale and min_width arguments that are present in every Component.

\n\n
    \n
  • scale is an integer that defines how an element will take up space in a Row. If scale is set to 0, and element will not expand to take up space. If scale is set to 1 or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, btn1 will expand twice as much as btn2, while btn0 will not expand at all:
  • \n
\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn0 = gr.Button(\"Button 0\", scale=0)\n        btn1 = gr.Button(\"Button 1\", scale=1)\n        btn2 = gr.Button(\"Button 2\", scale=2)\n
\n\n
    \n
  • min_width will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all min_width values.
  • \n
\n\n

Learn more about Rows in the docs.

\n\n

Columns and Nesting

\n\n

Components within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        text1 = gr.Textbox(label=\"t1\")\n        slider2 = gr.Textbox(label=\"s2\")\n        drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n    with gr.Row():\n        with gr.Column(scale=1, min_width=600):\n            text1 = gr.Textbox(label=\"prompt 1\")\n            text2 = gr.Textbox(label=\"prompt 2\")\n            inbtw = gr.Button(\"Between\")\n            text4 = gr.Textbox(label=\"prompt 1\")\n            text5 = gr.Textbox(label=\"prompt 2\")\n        with gr.Column(scale=2, min_width=600):\n            img1 = gr.Image(\"images/cheetah.jpg\")\n            btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n
\n\n

\n\n

See how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the scale parameter. The column with twice the scale value takes up twice the width.

\n\n

Learn more about Columns in the docs.

\n\n

Tabs and Accordions

\n\n

You can also create Tabs using the with gr.Tab('tab_name'): clause. Any component created inside of a with gr.Tab('tab_name'): context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.

\n\n

For example:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

Also note the gr.Accordion('label') in this example. The Accordion is a layout that can be toggled open or closed. Like Tabs, it is a layout element that can selectively hide or show content. Any components that are defined inside of a with gr.Accordion('label'): will be hidden or shown when the accordion's toggle icon is clicked.

\n\n

Learn more about Tabs and Accordions in the docs.

\n\n

Visibility

\n\n

Both Components and Layout elements have a visible argument that can set initially and also updated using gr.update(). Setting gr.update(visible=...) on a Column can be used to show or hide a set of Components.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    error_box = gr.Textbox(label=\"Error\", visible=False)\n\n    name_box = gr.Textbox(label=\"Name\")\n    age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n    symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n    submit_btn = gr.Button(\"Submit\")\n\n    with gr.Column(visible=False) as output_col:\n        diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n        patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n    def submit(name, age, symptoms):\n        if len(name) == 0:\n            return {error_box: gr.update(value=\"Enter name\", visible=True)}\n        return {\n            output_col: gr.update(visible=True),\n            diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n            patient_summary_box: f\"{name}, {age} y/o\",\n        }\n\n    submit_btn.click(\n        submit,\n        [name_box, age_box, symptoms_box],\n        [error_box, diagnosis_box, patient_summary_box, output_col],\n    )\n\ndemo.launch()\n\n
\n\n

\n\n

Variable Number of Outputs

\n\n

By adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a variable numbers of outputs. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:

\n\n
import gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n    k = int(k)\n    return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n    s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n    textboxes = []\n    for i in range(max_textboxes):\n        t = gr.Textbox(f\"Textbox {i}\")\n        textboxes.append(t)\n\n    s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n   demo.launch()\n\n
\n\n

\n\n

Defining and Rendering Components Separately

\n\n

In some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using gr.Examples above the corresponding gr.Textbox input. Since gr.Examples requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the gr.Examples object.

\n\n

The solution to this is to define the gr.Textbox outside of the gr.Blocks() scope and use the component's .render() method wherever you'd like it placed in the UI.

\n\n

Here's a full code example:

\n\n
input_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n    gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n    input_textbox.render()\n
\n", "tags": [], "spaces": [], "url": "/guides/controlling-layout/", "contributor": null}], "override_signature": "with gradio.Tab():", "parent": "gradio"}, "group": {"class": null, "name": "Group", "description": "Group is a layout element within Blocks which groups together children so that they do not have any padding or margin between them.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "visible", "annotation": "bool", "doc": "If False, group will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": "with gr.Group():\n gr.Textbox(label=\"First\")\n gr.Textbox(label=\"Last\")", "fns": [], "override_signature": "with gradio.Group():", "parent": "gradio"}, "box": {"class": null, "name": "Box", "description": "Box is a a layout element which places children in a box with rounded corners and some padding around them.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "visible", "annotation": "bool", "doc": "If False, box will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": "with gr.Box():\n gr.Textbox(label=\"First\")\n gr.Textbox(label=\"Last\")", "fns": [], "override_signature": "with gradio.Box():", "parent": "gradio"}, "accordion": {"class": null, "name": "Accordion", "description": "Accordion is a layout element which can be toggled to show/hide the contained content.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "label", "annotation": "", "doc": "name of accordion section."}, {"name": "open", "annotation": "bool", "doc": "if True, accordion is open by default.", "default": "True"}, {"name": "visible", "annotation": "bool", "doc": null, "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": "with gr.Accordion(\"See Details\"):\n gr.Markdown(\"lorem ipsum\")", "fns": [], "parent": "gradio"}, "chatinterface": {"class": null, "name": "ChatInterface", "description": "ChatInterface is Gradio's high-level abstraction for creating chatbot UIs, and allows you to create a web-based demo around a chatbot model in a few lines of code. Only one parameter is required: fn, which takes a function that governs the response of the chatbot based on the user input and chat history. Additional parameters can be used to control the appearance and behavior of the demo.
", "tags": {"demos": "chatinterface_random_response, chatinterface_streaming_echo", "guides": "creating-a-chatbot-fast, sharing-your-app"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "fn", "annotation": "Callable", "doc": "the function to wrap the chat interface around. Should accept two parameters: a string input message and list of two-element lists of the form [[user_message, bot_message], ...] representing the chat history, and return a string response. See the Chatbot documentation for more information on the chat history format."}, {"name": "chatbot", "annotation": "Chatbot | None", "doc": "an instance of the gr.Chatbot component to use for the chat interface, if you would like to customize the chatbot properties. If not provided, a default gr.Chatbot component will be created.", "default": "None"}, {"name": "textbox", "annotation": "Textbox | None", "doc": "an instance of the gr.Textbox component to use for the chat interface, if you would like to customize the textbox properties. If not provided, a default gr.Textbox component will be created.", "default": "None"}, {"name": "additional_inputs", "annotation": "str | IOComponent | list[str | IOComponent] | None", "doc": "an instance or list of instances of gradio components (or their string shortcuts) to use as additional inputs to the chatbot. If components are not already rendered in a surrounding Blocks, then the components will be displayed under the chatbot, in an accordion.", "default": "None"}, {"name": "additional_inputs_accordion_name", "annotation": "str", "doc": "the label of the accordion to use for additional inputs, only used if additional_inputs is provided.", "default": "\"Additional Inputs\""}, {"name": "examples", "annotation": "list[str] | None", "doc": "sample inputs for the function; if provided, appear below the chatbot and can be clicked to populate the chatbot input.", "default": "None"}, {"name": "cache_examples", "annotation": "bool | None", "doc": "If True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False.", "default": "None"}, {"name": "title", "annotation": "str | None", "doc": "a title for the interface; if provided, appears above chatbot in large font. Also used as the tab title when opened in a browser window.", "default": "None"}, {"name": "description", "annotation": "str | None", "doc": "a description for the interface; if provided, appears above the chatbot and beneath the title in regular font. Accepts Markdown and HTML content.", "default": "None"}, {"name": "theme", "annotation": "Theme | str | None", "doc": "Theme to use, loaded from gradio.themes.", "default": "None"}, {"name": "css", "annotation": "str | None", "doc": "custom css or path to custom css file to use with interface.", "default": "None"}, {"name": "analytics_enabled", "annotation": "bool | None", "doc": "Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True.", "default": "None"}, {"name": "submit_btn", "annotation": "str | None | Button", "doc": "Text to display on the submit button. If None, no button will be displayed. If a Button object, that button will be used.", "default": "\"Submit\""}, {"name": "stop_btn", "annotation": "str | None | Button", "doc": "Text to display on the stop button, which replaces the submit_btn when the submit_btn or retry_btn is clicked and response is streaming. Clicking on the stop_btn will halt the chatbot response. If set to None, stop button functionality does not appear in the chatbot. If a Button object, that button will be used as the stop button.", "default": "\"Stop\""}, {"name": "retry_btn", "annotation": "str | None | Button", "doc": "Text to display on the retry button. If None, no button will be displayed. If a Button object, that button will be used.", "default": "\"\ud83d\udd04 Retry\""}, {"name": "undo_btn", "annotation": "str | None | Button", "doc": "Text to display on the delete last button. If None, no button will be displayed. If a Button object, that button will be used.", "default": "\"\u21a9\ufe0f Undo\""}, {"name": "clear_btn", "annotation": "str | None | Button", "doc": "Text to display on the clear button. If None, no button will be displayed. If a Button object, that button will be used.", "default": "\"\ud83d\uddd1\ufe0f Clear\""}, {"name": "autofocus", "annotation": "bool", "doc": "If True, autofocuses to the textbox when the page loads.", "default": "True"}], "returns": {"annotation": null}, "example": "import gradio as gr\n\ndef echo(message, history):\n return message\n\ndemo = gr.ChatInterface(fn=echo, examples=[\"hello\", \"hola\", \"merhaba\"], title=\"Echo Bot\")\ndemo.launch()", "fns": [], "demos": [["chatinterface_random_response", "import random\nimport gradio as gr\n\ndef random_response(message, history):\n return random.choice([\"Yes\", \"No\"])\n\ndemo = gr.ChatInterface(random_response)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["chatinterface_streaming_echo", "import time\nimport gradio as gr\n\ndef slow_echo(message, history):\n for i in range(len(message)):\n time.sleep(0.05)\n yield \"You typed: \" + message[: i+1]\n\ndemo = gr.ChatInterface(slow_echo).queue()\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [{"name": "creating-a-chatbot-fast", "category": "chatbots", "pretty_category": "Chatbots", "guide_index": 1, "absolute_index": 13, "pretty_name": "Creating A Chatbot Fast", "content": "# How to Create a Chatbot with Gradio\n\n\n\n## Introduction\n\nChatbots are a popular application of large language models. Using `gradio`, you can easily build a demo of your chatbot model and share that with your users, or try it yourself using an intuitive chatbot UI.\n\nThis tutorial uses `gr.ChatInterface()`, which is a high-level abstraction that allows you to create your chatbot UI fast, often with a single line of code. The chatbot interface that we create will look something like this:\n\n\n\nWe'll start with a couple of simple examples, and then show how to use `gr.ChatInterface()` with real language models from several popular APIs and libraries, including `langchain`, `openai`, and Hugging Face. \n\n**Prerequisites**: please make sure you are using the **latest version** version of Gradio: \n\n```bash\n$ pip install --upgrade gradio\n```\n\n## Defining a chat function\n\nWhen working with `gr.ChatInterface()`, the first thing you should do is define your chat function. Your chat function should take two arguments: `message` and then `history` (the arguments can be named anything, but must be in this order).\n\n* `message`: a `str` representing the user's input.\n* `history`: a `list` of `list` representing the conversations up until that point. Each inner list consists of two `str` representing a pair: `[user input, bot response]`. \n\nYour function should return a single string response, which is the bot's response to the particular user input `message`. Your function can take into account the `history` of messages, as well as the current message.\n\nLet's take a look at a few examples.\n\n## Example: a chatbot that responds yes or no\n\nLet's write a chat function that responds `Yes` or `No` randomly.\n\nHere's our chat function:\n\n```python\nimport random\n\ndef random_response(message, history):\n return random.choice([\"Yes\", \"No\"])\n```\n\nNow, we can plug this into `gr.ChatInterface()` and call the `.launch()` method to create the web interface:\n\n```python\nimport gradio as gr\n\ngr.ChatInterface(random_response).launch()\n```\n\nThat's it! Here's our running demo, try it out:\n\n\n\n## Another example using the user's input and history\n\nOf course, the previous example was very simplistic, it didn't even take user input or the previous history into account! Here's another simple example showing how to incorporate a user's input as well as the history.\n\n```python\nimport random\nimport gradio as gr\n\ndef alternatingly_agree(message, history):\n if len(history) % 2 == 0:\n return f\"Yes, I do think that '{message}'\"\n else:\n return \"I don't think so\"\n\ngr.ChatInterface(alternatingly_agree).launch()\n```\n\n## Streaming chatbots \n\nIf in your chat function, you use `yield` to generate a sequence of responses, you'll end up with a streaming chatbot. It's that simple!\n\n```python\nimport time\nimport gradio as gr\n\ndef slow_echo(message, history):\n for i in range(len(message)):\n time.sleep(0.3)\n yield \"You typed: \" + message[: i+1]\n\ngr.ChatInterface(slow_echo).queue().launch()\n```\n\nNotice that we've [enabled queuing](/guides/key-features#queuing), which is required to use generator functions. While the response is streaming, the \"Submit\" button turns into a \"Stop\" button that can be used to stop the generator function. You can customize the appearance and behavior of the \"Stop\" button using the `stop_btn` parameter.\n\n## Customizing your chatbot\n\nIf you're familiar with Gradio's `Interface` class, the `gr.ChatInterface` includes many of the same arguments that you can use to customize the look and feel of your Chatbot. For example, you can:\n\n* add a title and description above your chatbot using `title` and `description` arguments.\n* add a theme or custom css using `theme` and `css` arguments respectively.\n* add `examples` and even enable `cache_examples`, which make it easier for users to try it out .\n* You can change the text or disable each of the buttons that appear in the chatbot interface: `submit_btn`, `retry_btn`, `undo_btn`, `clear_btn`.\n\nIf you want to customize the `gr.Chatbot` or `gr.Textbox` that compose the `ChatInterface`, then you can pass in your own chatbot or textbox as well. Here's an example of how we can use these parameters:\n\n\n```python\nimport gradio as gr\n\ndef yes_man(message, history):\n if message.endswith(\"?\"):\n return \"Yes\"\n else:\n return \"Ask me anything!\"\n\ngr.ChatInterface(\n yes_man,\n chatbot=gr.Chatbot(height=300),\n textbox=gr.Textbox(placeholder=\"Ask me a yes or no question\", container=False, scale=7),\n title=\"Yes Man\",\n description=\"Ask Yes Man any question\",\n theme=\"soft\",\n examples=[\"Hello\", \"Am I cool?\", \"Are tomatoes vegetables?\"],\n cache_examples=True,\n retry_btn=None,\n undo_btn=\"Delete Previous\",\n clear_btn=\"Clear\",\n).launch()\n```\n\n## Additional Inputs\n\nYou may want to add additional parameters to your chatbot and expose them to your users through the Chatbot UI. For example, suppose you want to add a textbox for a system prompt, or a slider that sets the number of tokens in the chatbot's response. The `ChatInterface` class supports an `additional_inputs` parameter which can be used to add additional input components.\n\nThe `additional_inputs` parameters accepts a component or a list of components. You can pass the component instances directly, or use their string shortcuts (e.g. `\"textbox\"` instead of `gr.Textbox()`). If you pass in component instances, and they have *not* already been rendered, then the components will appear underneath the chatbot (and any examples) within a `gr.Accordion()`. You can set the label of this accordion using the `additional_inputs_accordion_name` parameter. \n\nHere's a complete example:\n\n```python\nimport gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n for i in range(min(len(response), int(tokens))):\n time.sleep(0.05)\n yield response[: i+1]\n\ndemo = gr.ChatInterface(echo, \n additional_inputs=[\n gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\"), \n gr.Slider(10, 100)\n ]\n )\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n```\n\nIf the components you pass into the `additional_inputs` have already been rendered in a parent `gr.Blocks()`, then they will *not* be re-rendered in the accordion. This provides flexibility in deciding where to lay out the input components. In the example below, we position the `gr.Textbox()` on top of the Chatbot UI, while keeping the slider underneath.\n\n```python\nimport gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n for i in range(min(len(response), int(tokens))):\n time.sleep(0.05)\n yield response[: i+1]\n\nwith gr.Blocks() as demo:\n system_prompt = gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\")\n slider = gr.Slider(10, 100, render=False)\n \n gr.ChatInterface(\n echo, additional_inputs=[system_prompt, slider]\n )\n\ndemo.queue().launch()\n```\n\nIf you need to create something even more custom, then its best to construct the chatbot UI using the low-level `gr.Blocks()` API. We have [a dedicated guide for that here](/guides/creating-a-custom-chatbot-with-blocks).\n\n## Using your chatbot via an API\n\nOnce you've built your Gradio chatbot and are hosting it on [Hugging Face Spaces](https://hf.space) or somewhere else, then you can query it with a simple API at the `/chat` endpoint. The endpoint just expects the user's message (and potentially additional inputs if you have set any using the `additional_inputs` parameter), and will return the response, internally keeping track of the messages sent so far.\n\n[](https://github.com/gradio-app/gradio/assets/1778297/7b10d6db-6476-4e2e-bebd-ecda802c3b8f)\n\nTo use the endpoint, you should use either the [Gradio Python Client](/guides/getting-started-with-the-python-client) or the [Gradio JS client](/guides/getting-started-with-the-js-client).\n\n## A `langchain` example\n\nNow, let's actually use the `gr.ChatInterface` with some real large language models. We'll start by using `langchain` on top of `openai` to build a general-purpose streaming chatbot application in 19 lines of code. You'll need to have an OpenAI key for this example (keep reading for the free, open-source equivalent!)\n\n```python\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import AIMessage, HumanMessage\nimport openai\nimport gradio as gr\n\nos.envrion[\"OPENAI_API_KEY\"] = \"sk-...\" # Replace with your key\n\nllm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')\n\ndef predict(message, history):\n history_langchain_format = []\n for human, ai in history:\n history_langchain_format.append(HumanMessage(content=human))\n history_langchain_format.append(AIMessage(content=ai))\n history_langchain_format.append(HumanMessage(content=message))\n gpt_response = llm(history_langchain_format)\n return gpt_response.content\n\ngr.ChatInterface(predict).launch() \n```\n\n## A streaming example using `openai`\n\nOf course, we could also use the `openai` library directy. Here a similar example, but this time with streaming results as well:\n\n\n```python\nimport openai\nimport gradio as gr\n\nopenai.api_key = \"sk-...\" # Replace with your key\n\ndef predict(message, history):\n history_openai_format = []\n for human, assistant in history:\n history_openai_format.append({\"role\": \"user\", \"content\": human })\n history_openai_format.append({\"role\": \"assistant\", \"content\":assistant})\n history_openai_format.append({\"role\": \"user\", \"content\": message})\n\n response = openai.ChatCompletion.create(\n model='gpt-3.5-turbo',\n messages= history_openai_format, \n temperature=1.0,\n stream=True\n )\n \n partial_message = \"\"\n for chunk in response:\n if len(chunk['choices'][0]['delta']) != 0:\n partial_message = partial_message + chunk['choices'][0]['delta']['content']\n yield partial_message \n\ngr.ChatInterface(predict).queue().launch() \n```\n\n## Example using a local, open-source LLM with Hugging Face\n\nOf course, in many cases you want to run a chatbot locally. Here's the equivalent example using Together's RedePajama model, from Hugging Face (this requires you to have a GPU with CUDA).\n\n```python\nimport gradio as gr\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer\nfrom threading import Thread\n\ntokenizer = AutoTokenizer.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\")\nmodel = AutoModelForCausalLM.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\", torch_dtype=torch.float16)\nmodel = model.to('cuda:0')\n\nclass StopOnTokens(StoppingCriteria):\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n stop_ids = [29, 0]\n for stop_id in stop_ids:\n if input_ids[0][-1] == stop_id:\n return True\n return False\n\ndef predict(message, history): \n\n history_transformer_format = history + [[message, \"\"]]\n stop = StopOnTokens()\n\n messages = \"\".join([\"\".join([\"\\n:\"+item[0], \"\\n:\"+item[1]]) #curr_system_message + \n for item in history_transformer_format])\n \n model_inputs = tokenizer([messages], return_tensors=\"pt\").to(\"cuda\")\n streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)\n generate_kwargs = dict(\n model_inputs,\n streamer=streamer,\n max_new_tokens=1024,\n do_sample=True,\n top_p=0.95,\n top_k=1000,\n temperature=1.0,\n num_beams=1,\n stopping_criteria=StoppingCriteriaList([stop])\n )\n t = Thread(target=model.generate, kwargs=generate_kwargs)\n t.start()\n\n partial_message = \"\"\n for new_token in streamer:\n if new_token != '<':\n partial_message += new_token\n yield partial_message \n \n\ngr.ChatInterface(predict).queue().launch()\n```\n\nWith those examples, you should be all set to create your own Gradio Chatbot demos soon! For building even more custom Chatbot applications, check out [a dedicated guide](/guides/creating-a-custom-chatbot-with-blocks) using the low-level `gr.Blocks()` API.", "html": "

How to Create a Chatbot with Gradio

\n\n

Introduction

\n\n

Chatbots are a popular application of large language models. Using gradio, you can easily build a demo of your chatbot model and share that with your users, or try it yourself using an intuitive chatbot UI.

\n\n

This tutorial uses gr.ChatInterface(), which is a high-level abstraction that allows you to create your chatbot UI fast, often with a single line of code. The chatbot interface that we create will look something like this:

\n\n

\n\n

We'll start with a couple of simple examples, and then show how to use gr.ChatInterface() with real language models from several popular APIs and libraries, including langchain, openai, and Hugging Face.

\n\n

Prerequisites: please make sure you are using the latest version version of Gradio:

\n\n
$ pip install --upgrade gradio\n
\n\n

Defining a chat function

\n\n

When working with gr.ChatInterface(), the first thing you should do is define your chat function. Your chat function should take two arguments: message and then history (the arguments can be named anything, but must be in this order).

\n\n
    \n
  • message: a str representing the user's input.
  • \n
  • history: a list of list representing the conversations up until that point. Each inner list consists of two str representing a pair: [user input, bot response].
  • \n
\n\n

Your function should return a single string response, which is the bot's response to the particular user input message. Your function can take into account the history of messages, as well as the current message.

\n\n

Let's take a look at a few examples.

\n\n

Example: a chatbot that responds yes or no

\n\n

Let's write a chat function that responds Yes or No randomly.

\n\n

Here's our chat function:

\n\n
import random\n\ndef random_response(message, history):\n    return random.choice([\"Yes\", \"No\"])\n
\n\n

Now, we can plug this into gr.ChatInterface() and call the .launch() method to create the web interface:

\n\n
import gradio as gr\n\ngr.ChatInterface(random_response).launch()\n
\n\n

That's it! Here's our running demo, try it out:

\n\n

\n\n

Another example using the user's input and history

\n\n

Of course, the previous example was very simplistic, it didn't even take user input or the previous history into account! Here's another simple example showing how to incorporate a user's input as well as the history.

\n\n
import random\nimport gradio as gr\n\ndef alternatingly_agree(message, history):\n    if len(history) % 2 == 0:\n        return f\"Yes, I do think that '{message}'\"\n    else:\n        return \"I don't think so\"\n\ngr.ChatInterface(alternatingly_agree).launch()\n
\n\n

Streaming chatbots

\n\n

If in your chat function, you use yield to generate a sequence of responses, you'll end up with a streaming chatbot. It's that simple!

\n\n
import time\nimport gradio as gr\n\ndef slow_echo(message, history):\n    for i in range(len(message)):\n        time.sleep(0.3)\n        yield \"You typed: \" + message[: i+1]\n\ngr.ChatInterface(slow_echo).queue().launch()\n
\n\n

Notice that we've enabled queuing, which is required to use generator functions. While the response is streaming, the \"Submit\" button turns into a \"Stop\" button that can be used to stop the generator function. You can customize the appearance and behavior of the \"Stop\" button using the stop_btn parameter.

\n\n

Customizing your chatbot

\n\n

If you're familiar with Gradio's Interface class, the gr.ChatInterface includes many of the same arguments that you can use to customize the look and feel of your Chatbot. For example, you can:

\n\n
    \n
  • add a title and description above your chatbot using title and description arguments.
  • \n
  • add a theme or custom css using theme and css arguments respectively.
  • \n
  • add examples and even enable cache_examples, which make it easier for users to try it out .
  • \n
  • You can change the text or disable each of the buttons that appear in the chatbot interface: submit_btn, retry_btn, undo_btn, clear_btn.
  • \n
\n\n

If you want to customize the gr.Chatbot or gr.Textbox that compose the ChatInterface, then you can pass in your own chatbot or textbox as well. Here's an example of how we can use these parameters:

\n\n
import gradio as gr\n\ndef yes_man(message, history):\n    if message.endswith(\"?\"):\n        return \"Yes\"\n    else:\n        return \"Ask me anything!\"\n\ngr.ChatInterface(\n    yes_man,\n    chatbot=gr.Chatbot(height=300),\n    textbox=gr.Textbox(placeholder=\"Ask me a yes or no question\", container=False, scale=7),\n    title=\"Yes Man\",\n    description=\"Ask Yes Man any question\",\n    theme=\"soft\",\n    examples=[\"Hello\", \"Am I cool?\", \"Are tomatoes vegetables?\"],\n    cache_examples=True,\n    retry_btn=None,\n    undo_btn=\"Delete Previous\",\n    clear_btn=\"Clear\",\n).launch()\n
\n\n

Additional Inputs

\n\n

You may want to add additional parameters to your chatbot and expose them to your users through the Chatbot UI. For example, suppose you want to add a textbox for a system prompt, or a slider that sets the number of tokens in the chatbot's response. The ChatInterface class supports an additional_inputs parameter which can be used to add additional input components.

\n\n

The additional_inputs parameters accepts a component or a list of components. You can pass the component instances directly, or use their string shortcuts (e.g. \"textbox\" instead of gr.Textbox()). If you pass in component instances, and they have not already been rendered, then the components will appear underneath the chatbot (and any examples) within a gr.Accordion(). You can set the label of this accordion using the additional_inputs_accordion_name parameter.

\n\n

Here's a complete example:

\n\n
import gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n    response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n    for i in range(min(len(response), int(tokens))):\n        time.sleep(0.05)\n        yield response[: i+1]\n\ndemo = gr.ChatInterface(echo, \n                        additional_inputs=[\n                            gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\"), \n                            gr.Slider(10, 100)\n                        ]\n                       )\n\nif __name__ == \"__main__\":\n    demo.queue().launch()\n
\n\n

If the components you pass into the additional_inputs have already been rendered in a parent gr.Blocks(), then they will not be re-rendered in the accordion. This provides flexibility in deciding where to lay out the input components. In the example below, we position the gr.Textbox() on top of the Chatbot UI, while keeping the slider underneath.

\n\n
import gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n    response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n    for i in range(min(len(response), int(tokens))):\n        time.sleep(0.05)\n        yield response[: i+1]\n\nwith gr.Blocks() as demo:\n    system_prompt = gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\")\n    slider = gr.Slider(10, 100, render=False)\n\n    gr.ChatInterface(\n        echo, additional_inputs=[system_prompt, slider]\n    )\n\ndemo.queue().launch()\n
\n\n

If you need to create something even more custom, then its best to construct the chatbot UI using the low-level gr.Blocks() API. We have a dedicated guide for that here.

\n\n

Using your chatbot via an API

\n\n

Once you've built your Gradio chatbot and are hosting it on Hugging Face Spaces or somewhere else, then you can query it with a simple API at the /chat endpoint. The endpoint just expects the user's message (and potentially additional inputs if you have set any using the additional_inputs parameter), and will return the response, internally keeping track of the messages sent so far.

\n\n

\n\n

To use the endpoint, you should use either the Gradio Python Client or the Gradio JS client.

\n\n

A langchain example

\n\n

Now, let's actually use the gr.ChatInterface with some real large language models. We'll start by using langchain on top of openai to build a general-purpose streaming chatbot application in 19 lines of code. You'll need to have an OpenAI key for this example (keep reading for the free, open-source equivalent!)

\n\n
from langchain.chat_models import ChatOpenAI\nfrom langchain.schema import AIMessage, HumanMessage\nimport openai\nimport gradio as gr\n\nos.envrion[\"OPENAI_API_KEY\"] = \"sk-...\"  # Replace with your key\n\nllm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')\n\ndef predict(message, history):\n    history_langchain_format = []\n    for human, ai in history:\n        history_langchain_format.append(HumanMessage(content=human))\n        history_langchain_format.append(AIMessage(content=ai))\n    history_langchain_format.append(HumanMessage(content=message))\n    gpt_response = llm(history_langchain_format)\n    return gpt_response.content\n\ngr.ChatInterface(predict).launch() \n
\n\n

A streaming example using openai

\n\n

Of course, we could also use the openai library directy. Here a similar example, but this time with streaming results as well:

\n\n
import openai\nimport gradio as gr\n\nopenai.api_key = \"sk-...\"  # Replace with your key\n\ndef predict(message, history):\n    history_openai_format = []\n    for human, assistant in history:\n        history_openai_format.append({\"role\": \"user\", \"content\": human })\n        history_openai_format.append({\"role\": \"assistant\", \"content\":assistant})\n    history_openai_format.append({\"role\": \"user\", \"content\": message})\n\n    response = openai.ChatCompletion.create(\n        model='gpt-3.5-turbo',\n        messages= history_openai_format,         \n        temperature=1.0,\n        stream=True\n    )\n\n    partial_message = \"\"\n    for chunk in response:\n        if len(chunk['choices'][0]['delta']) != 0:\n            partial_message = partial_message + chunk['choices'][0]['delta']['content']\n            yield partial_message \n\ngr.ChatInterface(predict).queue().launch() \n
\n\n

Example using a local, open-source LLM with Hugging Face

\n\n

Of course, in many cases you want to run a chatbot locally. Here's the equivalent example using Together's RedePajama model, from Hugging Face (this requires you to have a GPU with CUDA).

\n\n
import gradio as gr\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer\nfrom threading import Thread\n\ntokenizer = AutoTokenizer.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\")\nmodel = AutoModelForCausalLM.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\", torch_dtype=torch.float16)\nmodel = model.to('cuda:0')\n\nclass StopOnTokens(StoppingCriteria):\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n        stop_ids = [29, 0]\n        for stop_id in stop_ids:\n            if input_ids[0][-1] == stop_id:\n                return True\n        return False\n\ndef predict(message, history): \n\n    history_transformer_format = history + [[message, \"\"]]\n    stop = StopOnTokens()\n\n    messages = \"\".join([\"\".join([\"\\n:\"+item[0], \"\\n:\"+item[1]])  #curr_system_message + \n                for item in history_transformer_format])\n\n    model_inputs = tokenizer([messages], return_tensors=\"pt\").to(\"cuda\")\n    streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)\n    generate_kwargs = dict(\n        model_inputs,\n        streamer=streamer,\n        max_new_tokens=1024,\n        do_sample=True,\n        top_p=0.95,\n        top_k=1000,\n        temperature=1.0,\n        num_beams=1,\n        stopping_criteria=StoppingCriteriaList([stop])\n        )\n    t = Thread(target=model.generate, kwargs=generate_kwargs)\n    t.start()\n\n    partial_message  = \"\"\n    for new_token in streamer:\n        if new_token != '<':\n            partial_message += new_token\n            yield partial_message \n\n\ngr.ChatInterface(predict).queue().launch()\n
\n\n

With those examples, you should be all set to create your own Gradio Chatbot demos soon! For building even more custom Chatbot applications, check out a dedicated guide using the low-level gr.Blocks() API.

\n", "tags": ["NLP", "TEXT", "CHAT"], "spaces": [], "url": "/guides/creating-a-chatbot-fast/", "contributor": null}, {"name": "sharing-your-app", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 3, "absolute_index": 2, "pretty_name": "Sharing Your App", "content": "# Sharing Your App\n\nHow to share your Gradio app: \n\n1. [Sharing demos with the share parameter](#sharing-demos)\n2. [Hosting on HF Spaces](#hosting-on-hf-spaces)\n3. [Embedding hosted spaces](#embedding-hosted-spaces)\n4. [Embedding with web components](#embedding-with-web-components)\n5. [Using the API page](#api-page)\n6. [Adding authentication to the page](#authentication)\n7. [Accessing Network Requests](#accessing-the-network-request-directly)\n8. [Mounting within FastAPI](#mounting-within-another-fast-api-app)\n9. [Security](#security-and-file-access)\n\n## Sharing Demos\n\nGradio demos can be easily shared publicly by setting `share=True` in the `launch()` method. Like this:\n\n```python\ndemo.launch(share=True)\n```\n\nThis generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: **XXXXX.gradio.app**. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.\n\nKeep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set `share=False` (the default, except in colab notebooks), only a local link is created, which can be shared by [port-forwarding](https://www.ssh.com/ssh/tunneling/example) with specific users. \n\n![sharing](https://github.com/gradio-app/gradio/blob/main/guides/assets/sharing.svg?raw=true)\n\nShare links expire after 72 hours.\n\n## Hosting on HF Spaces\n\nIf you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. [Hugging Face Spaces](http://huggingface.co/spaces/) provides the infrastructure to permanently host your machine learning model for free! \n\nAfter you have [created a free Hugging Face account](https://huggingface.co/join), you have three methods to deploy your Gradio app to Hugging Face Spaces:\n\n1. From terminal: run `gradio deploy` in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on `git push`.\n\n2. From your browser: Drag and drop a folder containing your Gradio model and all related files [here](https://huggingface.co/new-space).\n\n3. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See [this guide how to host on Hugging Face Spaces](https://huggingface.co/blog/gradio-spaces) for more information. \n\n\n\nNote: Some components, like `gr.Image`, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with `show_share_button`, such as `gr.Image(show_share_button=False)`. \n\n![Image with show_share_button=True](https://github.com/gradio-app/gradio/blob/main/guides/assets/share_icon.png?raw=true)\n\n## Embedding Hosted Spaces\n\nOnce you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.\n\nThere are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:\n\n![Embed this Space dropdown option](https://github.com/gradio-app/gradio/blob/main/guides/assets/embed_this_space.png?raw=true)\n\n### Embedding with Web Components\n\nWeb components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app. \n\nTo embed with Web Components:\n\n1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using). \n\n```html\n\n```\n\n2. Add \n```html\n\n```\n\nelement where you want to place the app. Set the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:\n\n\n```html\n\n```\n\n\n\nYou can see examples of how web components look on the Gradio landing page.\n\nYou can also customize the appearance and behavior of your web component with attributes that you pass into the `` tag:\n\n* `src`: as we've seen, the `src` attributes links to the URL of the hosted Gradio demo that you would like to embed\n* `space`: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a `username/space_name` instead of a full URL. Example: `gradio/Echocardiogram-Segmentation`. If this attribute attribute is provided, then `src` does not need to be provided.\n* `control_page_title`: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default `\"false\"`)\n* `initial_height`: the initial height of the web component while it is loading the Gradio app, (by default `\"300px\"`). Note that the final height is set based on the size of the Gradio app.\n* `container`: whether to show the border frame and information about where the Space is hosted (by default `\"true\"`)\n* `info`: whether to show just the information about where the Space is hosted underneath the embedded app (by default `\"true\"`)\n* `autoscroll`: whether to autoscroll to the output when prediction has finished (by default `\"false\"`)\n* `eager`: whether to load the Gradio app as soon as the page loads (by default `\"false\"`)\n* `theme_mode`: whether to use the `dark`, `light`, or default `system` theme mode (by default `\"system\"`)\n\nHere's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px. \n\n```html\n\n```\n\n_Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as `header { ... }` and `footer { ... }` will be the most likely to cause issues._\n\n### Embedding with IFrames\n\nTo embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:\n\n```html\n\n```\n\nAgain, you can find the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.\n\nNote: if you use IFrames, you'll probably want to add a fixed `height` attribute and set `style=\"border:0;\"` to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the `allow` attribute.\n\n## API Page\n\nYou can use almost any Gradio app as an API! In the footer of a Gradio app [like this one](https://huggingface.co/spaces/gradio/hello_world), you'll see a \"Use via API\" link. \n\n![Use via API](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/api3.gif)\n\nThis is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either [the Python client](https://gradio.app/guides/getting-started-with-the-python-client/), or [the JavaScript client](https://gradio.app/guides/getting-started-with-the-js-client/). For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.\n\nThe endpoints are automatically created when you launch a Gradio `Interface`. If you are using Gradio `Blocks`, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as\n\n```python\nbtn.click(add, [num1, num2], output, api_name=\"addition\")\n```\n\nThis will add and document the endpoint `/api/addition/` to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints. \n\n*Note*: For Gradio apps in which [queueing is enabled](https://gradio.app/guides/key-features#queuing), users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set `api_open=False` in the `queue()` method. To disable the API page altogether, set `show_api=False` in `.launch()`.\n\n## Authentication\n\nYou may wish to put an authentication page in front of your app to limit who can open your app. With the `auth=` keyword argument in the `launch()` method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":\n\n```python\ndemo.launch(auth=(\"admin\", \"pass1234\"))\n```\n\nFor more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.\n\nHere's an example of a function that accepts any login where the username and password are the same:\n\n```python\ndef same_auth(username, password):\n return username == password\ndemo.launch(auth=same_auth)\n```\n\nFor authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.\n\n## Accessing the Network Request Directly\n\nWhen a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is `gr.Request` and Gradio will pass in the network request as that parameter. Here is an example:\n\n```python\nimport gradio as gr\n\ndef echo(name, request: gr.Request):\n if request:\n print(\"Request headers dictionary:\", request.headers)\n print(\"IP address:\", request.client.host)\n return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n```\n\nNote: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then `request` will be `None`. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check `if request`.\n\n## Mounting Within Another FastAPI App\n\nIn some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with `gradio.mount_gradio_app()`.\n\nHere's a complete example:\n\n```python\nfrom fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n```\n\nNote that this approach also allows you run your Gradio apps on custom paths (`http://localhost:8000/gradio` in the example above).\n\n## Security and File Access\n\nSharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) **exposes** certain files on the host machine to users of your Gradio app. \n\nIn particular, Gradio apps ALLOW users to access to three kinds of files:\n\n* **Files in the same directory (or a subdirectory) of where the Gradio script is launched from.** For example, if the path to your gradio scripts is `/home/usr/scripts/project/app.py` and you launch it from `/home/usr/scripts/project/`, then users of your shared Gradio app will be able to access any files inside `/home/usr/scripts/project/`. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's `examples`).\n\n* **Temporary files created by Gradio.** These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable `GRADIO_TEMP_DIR` to an absolute path, such as `/home/usr/scripts/project/temp/`.\n\n* **Files that you explicitly allow via the `allowed_paths` parameter in `launch()`**. This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).\n\nGradio DOES NOT ALLOW access to:\n\n* **Dotfiles** (any files whose name begins with `'.'`) or any files that are contained in any directory whose name begins with `'.'`\n\n* **Files that you explicitly allow via the `blocked_paths` parameter in `launch()`**. You can pass in a list of additional directories or exact filepaths to the `blocked_paths` parameter in `launch()`. This parameter takes precedence over the files that Gradio exposes by default or by the `allowed_paths`.\n\n* **Any other paths on the host machine**. Users should NOT be able to access other arbitrary paths on the host. \n\nPlease make sure you are running the latest version of `gradio` for these security settings to apply. ", "html": "

Sharing Your App

\n\n

How to share your Gradio app:

\n\n
    \n
  1. Sharing demos with the share parameter
  2. \n
  3. Hosting on HF Spaces
  4. \n
  5. Embedding hosted spaces
  6. \n
  7. Embedding with web components
  8. \n
  9. Using the API page
  10. \n
  11. Adding authentication to the page
  12. \n
  13. Accessing Network Requests
  14. \n
  15. Mounting within FastAPI
  16. \n
  17. Security
  18. \n
\n\n

Sharing Demos

\n\n

Gradio demos can be easily shared publicly by setting share=True in the launch() method. Like this:

\n\n
demo.launch(share=True)\n
\n\n

This generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: XXXXX.gradio.app. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.

\n\n

Keep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set share=False (the default, except in colab notebooks), only a local link is created, which can be shared by port-forwarding with specific users.

\n\n

\"sharing\"

\n\n

Share links expire after 72 hours.

\n\n

Hosting on HF Spaces

\n\n

If you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. Hugging Face Spaces provides the infrastructure to permanently host your machine learning model for free!

\n\n

After you have created a free Hugging Face account, you have three methods to deploy your Gradio app to Hugging Face Spaces:

\n\n
    \n
  1. From terminal: run gradio deploy in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on git push.

  2. \n
  3. From your browser: Drag and drop a folder containing your Gradio model and all related files here.

  4. \n
  5. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See this guide how to host on Hugging Face Spaces for more information.

  6. \n
\n\n

\n\n

Note: Some components, like gr.Image, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with show_share_button, such as gr.Image(show_share_button=False).

\n\n

\"Imagesharebutton=True\" />

\n\n

Embedding Hosted Spaces

\n\n

Once you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.

\n\n

There are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:

\n\n

\"Embed

\n\n

Embedding with Web Components

\n\n

Web components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app.

\n\n

To embed with Web Components:

\n\n
    \n
  1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using).
  2. \n
\n\n
\n
\n\n
    \n
  1. Add
  2. \n
\n\n
\n
\n\n

element where you want to place the app. Set the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:

\n\n
\n
\n\n\n\n

You can see examples of how web components look on the Gradio landing page.

\n\n

You can also customize the appearance and behavior of your web component with attributes that you pass into the <gradio-app> tag:

\n\n
    \n
  • src: as we've seen, the src attributes links to the URL of the hosted Gradio demo that you would like to embed
  • \n
  • space: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a username/space_name instead of a full URL. Example: gradio/Echocardiogram-Segmentation. If this attribute attribute is provided, then src does not need to be provided.
  • \n
  • control_page_title: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default \"false\")
  • \n
  • initial_height: the initial height of the web component while it is loading the Gradio app, (by default \"300px\"). Note that the final height is set based on the size of the Gradio app.
  • \n
  • container: whether to show the border frame and information about where the Space is hosted (by default \"true\")
  • \n
  • info: whether to show just the information about where the Space is hosted underneath the embedded app (by default \"true\")
  • \n
  • autoscroll: whether to autoscroll to the output when prediction has finished (by default \"false\")
  • \n
  • eager: whether to load the Gradio app as soon as the page loads (by default \"false\")
  • \n
  • theme_mode: whether to use the dark, light, or default system theme mode (by default \"system\")
  • \n
\n\n

Here's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px.

\n\n
\n
\n\n

Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as header { ... } and footer { ... } will be the most likely to cause issues.

\n\n

Embedding with IFrames

\n\n

To embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:

\n\n
\n
\n\n

Again, you can find the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.

\n\n

Note: if you use IFrames, you'll probably want to add a fixed height attribute and set style=\"border:0;\" to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the allow attribute.

\n\n

API Page

\n\n

You can use almost any Gradio app as an API! In the footer of a Gradio app like this one, you'll see a \"Use via API\" link.

\n\n

\"Use

\n\n

This is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either the Python client, or the JavaScript client. For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.

\n\n

The endpoints are automatically created when you launch a Gradio Interface. If you are using Gradio Blocks, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as

\n\n
btn.click(add, [num1, num2], output, api_name=\"addition\")\n
\n\n

This will add and document the endpoint /api/addition/ to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints.

\n\n

Note: For Gradio apps in which queueing is enabled, users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set api_open=False in the queue() method. To disable the API page altogether, set show_api=False in .launch().

\n\n

Authentication

\n\n

You may wish to put an authentication page in front of your app to limit who can open your app. With the auth= keyword argument in the launch() method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":

\n\n
demo.launch(auth=(\"admin\", \"pass1234\"))\n
\n\n

For more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.

\n\n

Here's an example of a function that accepts any login where the username and password are the same:

\n\n
def same_auth(username, password):\n    return username == password\ndemo.launch(auth=same_auth)\n
\n\n

For authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.

\n\n

Accessing the Network Request Directly

\n\n

When a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is gr.Request and Gradio will pass in the network request as that parameter. Here is an example:

\n\n
import gradio as gr\n\ndef echo(name, request: gr.Request):\n    if request:\n        print(\"Request headers dictionary:\", request.headers)\n        print(\"IP address:\", request.client.host)\n    return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n
\n\n

Note: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then request will be None. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check if request.

\n\n

Mounting Within Another FastAPI App

\n\n

In some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with gradio.mount_gradio_app().

\n\n

Here's a complete example:

\n\n
from fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n    return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n
\n\n

Note that this approach also allows you run your Gradio apps on custom paths (http://localhost:8000/gradio in the example above).

\n\n

Security and File Access

\n\n

Sharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) exposes certain files on the host machine to users of your Gradio app.

\n\n

In particular, Gradio apps ALLOW users to access to three kinds of files:

\n\n
    \n
  • Files in the same directory (or a subdirectory) of where the Gradio script is launched from. For example, if the path to your gradio scripts is /home/usr/scripts/project/app.py and you launch it from /home/usr/scripts/project/, then users of your shared Gradio app will be able to access any files inside /home/usr/scripts/project/. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's examples).

  • \n
  • Temporary files created by Gradio. These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable GRADIO_TEMP_DIR to an absolute path, such as /home/usr/scripts/project/temp/.

  • \n
  • Files that you explicitly allow via the allowed_paths parameter in launch(). This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).

  • \n
\n\n

Gradio DOES NOT ALLOW access to:

\n\n
    \n
  • Dotfiles (any files whose name begins with '.') or any files that are contained in any directory whose name begins with '.'

  • \n
  • Files that you explicitly allow via the blocked_paths parameter in launch(). You can pass in a list of additional directories or exact filepaths to the blocked_paths parameter in launch(). This parameter takes precedence over the files that Gradio exposes by default or by the allowed_paths.

  • \n
  • Any other paths on the host machine. Users should NOT be able to access other arbitrary paths on the host.

  • \n
\n\n

Please make sure you are running the latest version of gradio for these security settings to apply.

\n", "tags": [], "spaces": [], "url": "/guides/sharing-your-app/", "contributor": null}], "parent": "gradio"}, "interface": {"class": null, "name": "Interface", "description": "Interface is Gradio's main high-level class, and allows you to create a web-based GUI / demo around a machine learning model (or any Python function) in a few lines of code. You must specify three parameters: (1) the function to create a GUI for (2) the desired input components and (3) the desired output components. Additional parameters can be used to control the appearance and behavior of the demo.
", "tags": {"demos": "hello_world, hello_world_3, gpt2_xl", "guides": "quickstart, key-features, sharing-your-app, interface-state, reactive-interfaces, advanced-interface-features, setting-up-a-gradio-demo-for-maximum-performance"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "fn", "annotation": "Callable", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "str | IOComponent | list[str | IOComponent] | None", "doc": "a single Gradio component, or list of Gradio components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of input components should match the number of parameters in fn. If set to None, then only the output components will be displayed."}, {"name": "outputs", "annotation": "str | IOComponent | list[str | IOComponent] | None", "doc": "a single Gradio component, or list of Gradio components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of output components should match the number of values returned by fn. If set to None, then only the input components will be displayed."}, {"name": "examples", "annotation": "list[Any] | list[list[Any]] | str | None", "doc": "sample inputs for the function; if provided, appear below the UI components and can be clicked to populate the interface. Should be nested list, in which the outer list consists of samples and each inner list consists of an input corresponding to each input component. A string path to a directory of examples can also be provided, but it should be within the directory with the python file running the gradio app. If there are multiple input components and a directory is provided, a log.csv file must be present in the directory to link corresponding inputs.", "default": "None"}, {"name": "cache_examples", "annotation": "bool | None", "doc": "If True, caches examples in the server for fast runtime in examples. If `fn` is a generator function, then the last yielded value will be used as the output. The default option in HuggingFace Spaces is True. The default option elsewhere is False.", "default": "None"}, {"name": "examples_per_page", "annotation": "int", "doc": "If examples are provided, how many to display per page.", "default": "10"}, {"name": "live", "annotation": "bool", "doc": "whether the interface should automatically rerun if any of the inputs change.", "default": "False"}, {"name": "interpretation", "annotation": "Callable | str | None", "doc": "function that provides interpretation explaining prediction output. Pass \"default\" to use simple built-in interpreter, \"shap\" to use a built-in shapley-based interpreter, or your own custom interpretation function. For more information on the different interpretation methods, see the Advanced Interface Features guide.", "default": "None"}, {"name": "num_shap", "annotation": "float", "doc": "a multiplier that determines how many examples are computed for shap-based interpretation. Increasing this value will increase shap runtime, but improve results. Only applies if interpretation is \"shap\".", "default": "2.0"}, {"name": "title", "annotation": "str | None", "doc": "a title for the interface; if provided, appears above the input and output components in large font. Also used as the tab title when opened in a browser window.", "default": "None"}, {"name": "description", "annotation": "str | None", "doc": "a description for the interface; if provided, appears above the input and output components and beneath the title in regular font. Accepts Markdown and HTML content.", "default": "None"}, {"name": "article", "annotation": "str | None", "doc": "an expanded article explaining the interface; if provided, appears below the input and output components in regular font. Accepts Markdown and HTML content.", "default": "None"}, {"name": "thumbnail", "annotation": "str | None", "doc": "path or url to image to use as display image when the web demo is shared on social media.", "default": "None"}, {"name": "theme", "annotation": "Theme | str | None", "doc": "Theme to use, loaded from gradio.themes.", "default": "None"}, {"name": "css", "annotation": "str | None", "doc": "custom css or path to custom css file to use with interface.", "default": "None"}, {"name": "allow_flagging", "annotation": "str | None", "doc": "one of \"never\", \"auto\", or \"manual\". If \"never\" or \"auto\", users will not see a button to flag an input and output. If \"manual\", users will see a button to flag. If \"auto\", every input the user submits will be automatically flagged (outputs are not flagged). If \"manual\", both the input and outputs are flagged when the user clicks flag button. This parameter can be set with environmental variable GRADIO_ALLOW_FLAGGING; otherwise defaults to \"manual\".", "default": "None"}, {"name": "flagging_options", "annotation": "list[str] | list[tuple[str, str]] | None", "doc": "if provided, allows user to select from the list of options when flagging. Only applies if allow_flagging is \"manual\". Can either be a list of tuples of the form (label, value), where label is the string that will be displayed on the button and value is the string that will be stored in the flagging CSV; or it can be a list of strings [\"X\", \"Y\"], in which case the values will be the list of strings and the labels will [\"Flag as X\", \"Flag as Y\"], etc.", "default": "None"}, {"name": "flagging_dir", "annotation": "str", "doc": "what to name the directory where flagged data is stored.", "default": "\"flagged\""}, {"name": "flagging_callback", "annotation": "FlaggingCallback", "doc": "An instance of a subclass of FlaggingCallback which will be called when a sample is flagged. By default logs to a local CSV file.", "default": "CSVLogger()"}, {"name": "analytics_enabled", "annotation": "bool | None", "doc": "Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "allow_duplication", "annotation": "bool", "doc": "If True, then will show a 'Duplicate Spaces' button on Hugging Face Spaces.", "default": "False"}], "returns": {"annotation": null}, "example": "import gradio as gr\n\ndef image_classifier(inp):\n return {'cat': 0.3, 'dog': 0.7}\n\ndemo = gr.Interface(fn=image_classifier, inputs=\"image\", outputs=\"label\")\ndemo.launch()", "fns": [{"fn": null, "name": "launch", "description": "Launches a simple web server that serves the demo. Can also be used to create a public link used by anyone to access the demo from their browser by setting share=True.
", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "inline", "annotation": "bool | None", "doc": "whether to display in the interface inline in an iframe. Defaults to True in python notebooks; False otherwise.", "default": "None"}, {"name": "inbrowser", "annotation": "bool", "doc": "whether to automatically launch the interface in a new tab on the default browser.", "default": "False"}, {"name": "share", "annotation": "bool | None", "doc": "whether to create a publicly shareable link for the interface. Creates an SSH tunnel to make your UI accessible from anywhere. If not provided, it is set to False by default every time, except when running in Google Colab. When localhost is not accessible (e.g. Google Colab), setting share=False is not supported.", "default": "None"}, {"name": "debug", "annotation": "bool", "doc": "if True, blocks the main thread from running. If running in Google Colab, this is needed to print the errors in the cell output.", "default": "False"}, {"name": "enable_queue", "annotation": "bool | None", "doc": "DEPRECATED (use .queue() method instead.) if True, inference requests will be served through a queue instead of with parallel threads. Required for longer inference times (> 1min) to prevent timeout. The default option in HuggingFace Spaces is True. The default option elsewhere is False.", "default": "None"}, {"name": "max_threads", "annotation": "int", "doc": "the maximum number of total threads that the Gradio app can generate in parallel. The default is inherited from the starlette library (currently 40). Applies whether the queue is enabled or not. But if queuing is enabled, this parameter is increaseed to be at least the concurrency_count of the queue.", "default": "40"}, {"name": "auth", "annotation": "Callable | tuple[str, str] | list[tuple[str, str]] | None", "doc": "If provided, username and password (or list of username-password tuples) required to access interface. Can also provide function that takes username and password and returns True if valid login.", "default": "None"}, {"name": "auth_message", "annotation": "str | None", "doc": "If provided, HTML message provided on login page.", "default": "None"}, {"name": "prevent_thread_lock", "annotation": "bool", "doc": "If True, the interface will block the main thread while the server is running.", "default": "False"}, {"name": "show_error", "annotation": "bool", "doc": "If True, any errors in the interface will be displayed in an alert modal and printed in the browser console log", "default": "False"}, {"name": "server_name", "annotation": "str | None", "doc": "to make app accessible on local network, set this to \"0.0.0.0\". Can be set by environment variable GRADIO_SERVER_NAME. If None, will use \"127.0.0.1\".", "default": "None"}, {"name": "server_port", "annotation": "int | None", "doc": "will start gradio app on this port (if available). Can be set by environment variable GRADIO_SERVER_PORT. If None, will search for an available port starting at 7860.", "default": "None"}, {"name": "show_tips", "annotation": "bool", "doc": "if True, will occasionally show tips about new Gradio features", "default": "False"}, {"name": "height", "annotation": "int", "doc": "The height in pixels of the iframe element containing the interface (used if inline=True)", "default": "500"}, {"name": "width", "annotation": "int | str", "doc": "The width in pixels of the iframe element containing the interface (used if inline=True)", "default": "\"100%\""}, {"name": "encrypt", "annotation": "bool | None", "doc": "DEPRECATED. Has no effect.", "default": "None"}, {"name": "favicon_path", "annotation": "str | None", "doc": "If a path to a file (.png, .gif, or .ico) is provided, it will be used as the favicon for the web page.", "default": "None"}, {"name": "ssl_keyfile", "annotation": "str | None", "doc": "If a path to a file is provided, will use this as the private key file to create a local server running on https.", "default": "None"}, {"name": "ssl_certfile", "annotation": "str | None", "doc": "If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.", "default": "None"}, {"name": "ssl_keyfile_password", "annotation": "str | None", "doc": "If a password is provided, will use this with the ssl certificate for https.", "default": "None"}, {"name": "ssl_verify", "annotation": "bool", "doc": "If False, skips certificate validation which allows self-signed certificates to be used.", "default": "True"}, {"name": "quiet", "annotation": "bool", "doc": "If True, suppresses most print statements.", "default": "False"}, {"name": "show_api", "annotation": "bool", "doc": "If True, shows the api docs in the footer of the app. Default True. If the queue is enabled, then api_open parameter of .queue() will determine if the api docs are shown, independent of the value of show_api.", "default": "True"}, {"name": "file_directories", "annotation": "list[str] | None", "doc": "This parameter has been renamed to `allowed_paths`. It will be removed in a future version.", "default": "None"}, {"name": "allowed_paths", "annotation": "list[str] | None", "doc": "List of complete filepaths or parent directories that gradio is allowed to serve (in addition to the directory containing the gradio python file). Must be absolute paths. Warning: if you provide directories, any files in these directories or their subdirectories are accessible to all users of your app.", "default": "None"}, {"name": "blocked_paths", "annotation": "list[str] | None", "doc": "List of complete filepaths or parent directories that gradio is not allowed to serve (i.e. users of your app are not allowed to access). Must be absolute paths. Warning: takes precedence over `allowed_paths` and all other directories exposed by Gradio by default.", "default": "None"}, {"name": "root_path", "annotation": "str | None", "doc": "The root path (or \"mount point\") of the application, if it's not served from the root (\"/\") of the domain. Often used when the application is behind a reverse proxy that forwards requests to the application. For example, if the application is served at \"https://example.com/myapp\", the `root_path` should be set to \"/myapp\". Can be set by environment variable GRADIO_ROOT_PATH. Defaults to \"\".", "default": "None"}, {"name": "app_kwargs", "annotation": "dict[str, Any] | None", "doc": "Additional keyword arguments to pass to the underlying FastAPI app as a dictionary of parameter keys and argument values. For example, `{\"docs_url\": \"/docs\"}`", "default": "None"}], "returns": {}, "example": "import gradio as gr\ndef reverse(text):\n return text[::-1]\ndemo = gr.Interface(reverse, \"text\", \"text\")\ndemo.launch(share=True, auth=(\"username\", \"password\"))", "override_signature": null, "parent": "gradio.Interface"}, {"fn": null, "name": "load", "description": "Warning: this method will be deprecated. Use the equivalent `gradio.load()` instead. This is a class method that constructs a Blocks from a Hugging Face repo. Can accept model repos (if src is \"models\") or Space repos (if src is \"spaces\"). The input and output components are automatically loaded from the repo.", "tags": {}, "parameters": [{"name": "name", "annotation": "str", "doc": "the name of the model (e.g. \"gpt2\" or \"facebook/bart-base\") or space (e.g. \"flax-community/spanish-gpt2\"), can include the `src` as prefix (e.g. \"models/facebook/bart-base\")"}, {"name": "src", "annotation": "str | None", "doc": "the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)", "default": "None"}, {"name": "api_key", "annotation": "str | None", "doc": "optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.", "default": "None"}, {"name": "alias", "annotation": "str | None", "doc": "optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)", "default": "None"}], "returns": {"annotation": "Blocks", "doc": "a Gradio Interface object for the given model"}, "example": null, "override_signature": null, "parent": "gradio.Interface"}, {"fn": null, "name": "from_pipeline", "description": "Class method that constructs an Interface from a Hugging Face transformers.Pipeline object. The input and output components are automatically determined from the pipeline.", "tags": {}, "parameters": [{"name": "pipeline", "annotation": "Pipeline", "doc": "the pipeline object to use."}], "returns": {"annotation": "Interface", "doc": "a Gradio Interface object from the given Pipeline"}, "example": "import gradio as gr\nfrom transformers import pipeline\npipe = pipeline(\"image-classification\")\ngr.Interface.from_pipeline(pipe).launch()", "override_signature": null, "parent": "gradio.Interface"}, {"fn": null, "name": "integrate", "description": "A catch-all method for integrating with other libraries. This method should be run after launch()", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "comet_ml", "annotation": "", "doc": "If a comet_ml Experiment object is provided, will integrate with the experiment and appear on Comet dashboard", "default": "None"}, {"name": "wandb", "annotation": "ModuleType | None", "doc": "If the wandb module is provided, will integrate with it and appear on WandB dashboard", "default": "None"}, {"name": "mlflow", "annotation": "ModuleType | None", "doc": "If the mlflow module is provided, will integrate with the experiment and appear on ML Flow dashboard", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Interface"}, {"fn": null, "name": "queue", "description": "You can control the rate of processed requests by creating a queue. This will allow you to set the number of requests to be processed at one time, and will let users know their position in the queue.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "concurrency_count", "annotation": "int", "doc": "Number of worker threads that will be processing requests from the queue concurrently. Increasing this number will increase the rate at which requests are processed, but will also increase the memory usage of the queue.", "default": "1"}, {"name": "status_update_rate", "annotation": "float | Literal['auto']", "doc": "If \"auto\", Queue will send status estimations to all clients whenever a job is finished. Otherwise Queue will send status at regular intervals set by this parameter as the number of seconds.", "default": "\"auto\""}, {"name": "client_position_to_load_data", "annotation": "int | None", "doc": "DEPRECATED. This parameter is deprecated and has no effect.", "default": "None"}, {"name": "default_enabled", "annotation": "bool | None", "doc": "Deprecated and has no effect.", "default": "None"}, {"name": "api_open", "annotation": "bool", "doc": "If True, the REST routes of the backend will be open, allowing requests made directly to those endpoints to skip the queue.", "default": "True"}, {"name": "max_size", "annotation": "int | None", "doc": "The maximum number of events the queue will store at any given moment. If the queue is full, new events will not be added and a user will receive a message saying that the queue is full. If None, the queue size will be unlimited.", "default": "None"}], "returns": {}, "example": "demo = gr.Interface(image_generator, gr.Textbox(), gr.Image())\ndemo.queue(concurrency_count=3)\ndemo.launch()", "override_signature": null, "parent": "gradio.Interface"}], "demos": [["hello_world", "import gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n \nif __name__ == \"__main__\":\n demo.launch() "], ["hello_world_3", "import gradio as gr\n\ndef greet(name, is_morning, temperature):\n salutation = \"Good morning\" if is_morning else \"Good evening\"\n greeting = f\"{salutation} {name}. It is {temperature} degrees today\"\n celsius = (temperature - 32) * 5 / 9\n return greeting, round(celsius, 2)\n\ndemo = gr.Interface(\n fn=greet,\n inputs=[\"text\", \"checkbox\", gr.Slider(0, 100)],\n outputs=[\"text\", \"number\"],\n)\nif __name__ == \"__main__\":\n demo.launch()\n"], ["gpt2_xl", "import gradio as gr\n\ntitle = \"gpt2-xl\"\n\nexamples = [\n [\"The tower is 324 metres (1,063 ft) tall,\"],\n [\"The Moon's orbit around Earth has\"],\n [\"The smooth Borealis basin in the Northern Hemisphere covers 40%\"],\n]\n\ndemo = gr.load(\n \"huggingface/gpt2-xl\",\n inputs=gr.Textbox(lines=5, max_lines=6, label=\"Input Text\"),\n title=title,\n examples=examples,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [{"name": "quickstart", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 1, "absolute_index": 0, "pretty_name": "Quickstart", "content": "# Quickstart\n\n**Prerequisite**: Gradio requires Python 3.8 or higher, that's all!\n\n## What Does Gradio Do?\n\nOne of the *best ways to share* your machine learning model, API, or data science workflow with others is to create an **interactive app** that allows your users or colleagues to try out the demo in their browsers.\n\nGradio allows you to **build demos and share them, all in Python.** And usually in just a few lines of code! So let's get started.\n\n## Hello, World\n\nTo get Gradio running with a simple \"Hello, World\" example, follow these three steps:\n\n1\\. Install Gradio using pip:\n\n```bash\npip install gradio\n```\n\n2\\. Run the code below as a Python script or in a Jupyter Notebook (or [Google Colab](https://colab.research.google.com/drive/18ODkJvyxHutTN0P5APWyGFO_xwNcgHDZ?usp=sharing)):\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n \ndemo.launch() \n```\n\nWe shorten the imported name to `gr` for better readability of code using Gradio. This is a widely adopted convention that you should follow so that anyone working with your code can easily understand it.\n\n3\\. The demo below will appear automatically within the Jupyter Notebook, or pop in a browser on [http://localhost:7860](http://localhost:7860) if running from a script:\n\n\n\nWhen developing locally, if you want to run the code as a Python script, you can use the Gradio CLI to launch the application **in reload mode**, which will provide seamless and fast development. Learn more about reloading in the [Auto-Reloading Guide](https://gradio.app/developing-faster-with-reload-mode/).\n\n```bash\ngradio app.py\n```\n\nNote: you can also do `python app.py`, but it won't provide the automatic reload mechanism.\n\n## The `Interface` Class\n\nYou'll notice that in order to make the demo, we created a `gr.Interface`. This `Interface` class can wrap any Python function with a user interface. In the example above, we saw a simple text-based function, but the function could be anything from music generator to a tax calculator to the prediction function of a pretrained machine learning model.\n\nThe core `Interface` class is initialized with three required parameters:\n\n- `fn`: the function to wrap a UI around\n- `inputs`: which component(s) to use for the input (e.g. `\"text\"`, `\"image\"` or `\"audio\"`)\n- `outputs`: which component(s) to use for the output (e.g. `\"text\"`, `\"image\"` or `\"label\"`)\n\nLet's take a closer look at these components used to provide input and output.\n\n## Components Attributes\n\nWe saw some simple `Textbox` components in the previous examples, but what if you want to change how the UI components look or behave?\n\nLet's say you want to customize the input text field \u2014 for example, you wanted it to be larger and have a text placeholder. If we use the actual class for `Textbox` instead of using the string shortcut, you have access to much more customizability through component attributes.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(\n fn=greet,\n inputs=gr.Textbox(lines=2, placeholder=\"Name Here...\"),\n outputs=\"text\",\n)\ndemo.launch()\n\n```\n\n\n## Multiple Input and Output Components\n\nSuppose you had a more complex function, with multiple inputs and outputs. In the example below, we define a function that takes a string, boolean, and number, and returns a string and number. Take a look how you pass a list of input and output components.\n\n```python\nimport gradio as gr\n\ndef greet(name, is_morning, temperature):\n salutation = \"Good morning\" if is_morning else \"Good evening\"\n greeting = f\"{salutation} {name}. It is {temperature} degrees today\"\n celsius = (temperature - 32) * 5 / 9\n return greeting, round(celsius, 2)\n\ndemo = gr.Interface(\n fn=greet,\n inputs=[\"text\", \"checkbox\", gr.Slider(0, 100)],\n outputs=[\"text\", \"number\"],\n)\ndemo.launch()\n\n```\n\n\nYou simply wrap the components in a list. Each component in the `inputs` list corresponds to one of the parameters of the function, in order. Each component in the `outputs` list corresponds to one of the values returned by the function, again in order.\n\n## An Image Example\n\nGradio supports many types of components, such as `Image`, `DataFrame`, `Video`, or `Label`. Let's try an image-to-image function to get a feel for these!\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n sepia_filter = np.array([\n [0.393, 0.769, 0.189], \n [0.349, 0.686, 0.168], \n [0.272, 0.534, 0.131]\n ])\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n```\n\n\nWhen using the `Image` component as input, your function will receive a NumPy array with the shape `(height, width, 3)`, where the last dimension represents the RGB values. We'll return an image as well in the form of a NumPy array.\n\nYou can also set the datatype used by the component with the `type=` keyword argument. For example, if you wanted your function to take a file path to an image instead of a NumPy array, the input `Image` component could be written as:\n\n```python\ngr.Image(type=\"filepath\", shape=...)\n```\n\nAlso note that our input `Image` component comes with an edit button \ud83d\udd89, which allows for cropping and zooming into images. Manipulating images in this way can help reveal biases or hidden flaws in a machine learning model!\n\nYou can read more about the many components and how to use them in the [Gradio docs](https://gradio.app/docs).\n\n## Chatbots\n\nGradio includes a high-level class, `gr.ChatInterface`, which is similar to `gr.Interface`, but is specifically designed for chatbot UIs. The `gr.ChatInterface` class also wraps a function but this function must have a specific signature. The function should take two arguments: `message` and then `history` (the arguments can be named anything, but must be in this order)\n\n* `message`: a `str` representing the user's input\n* `history`: a `list` of `list` representing the conversations up until that point. Each inner list consists of two `str` representing a pair: `[user input, bot response]`. \n\nYour function should return a single string response, which is the bot's response to the particular user input `message`.\n\nOther than that, `gr.ChatInterface` has no required parameters (though several are available for customization of the UI).\n\nHere's a toy example:\n\n```python\nimport random\nimport gradio as gr\n\ndef random_response(message, history):\n return random.choice([\"Yes\", \"No\"])\n\ndemo = gr.ChatInterface(random_response)\n\ndemo.launch()\n\n```\n\n\nYou can [read more about `gr.ChatInterface` here](https://gradio.app/guides/creating-a-chatbot-fast).\n\n## Blocks: More Flexibility and Control\n\nGradio offers two approaches to build apps:\n\n1\\. **Interface** and **ChatInterface**, which provide a high-level abstraction for creating demos that we've been discussing so far.\n\n2\\. **Blocks**, a low-level API for designing web apps with more flexible layouts and data flows. Blocks allows you to do things like feature multiple data flows and demos, control where components appear on the page, handle complex data flows (e.g. outputs can serve as inputs to other functions), and update properties/visibility of components based on user interaction \u2014 still all in Python. If this customizability is what you need, try `Blocks` instead!\n\n## Hello, Blocks\n\nLet's take a look at a simple example. Note how the API here differs from `Interface`.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n name = gr.Textbox(label=\"Name\")\n output = gr.Textbox(label=\"Output Box\")\n greet_btn = gr.Button(\"Greet\")\n greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n \n\ndemo.launch()\n```\n\n\nThings to note:\n\n- `Blocks` are made with a `with` clause, and any component created inside this clause is automatically added to the app.\n- Components appear vertically in the app in the order they are created. (Later we will cover customizing layouts!)\n- A `Button` was created, and then a `click` event-listener was added to this button. The API for this should look familiar! Like an `Interface`, the `click` method takes a Python function, input components, and output components.\n\n## More Complexity\n\nHere's an app to give you a taste of what's possible with `Blocks`:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nA lot more going on here! We'll cover how to create complex `Blocks` apps like this in the [building with blocks](https://gradio.app/building_with_blocks) section for you.\n\nCongrats, you're now familiar with the basics of Gradio! \ud83e\udd73 Go to our [next guide](https://gradio.app/key_features) to learn more about the key features of Gradio.\n", "html": "

Quickstart

\n\n

Prerequisite: Gradio requires Python 3.8 or higher, that's all!

\n\n

What Does Gradio Do?

\n\n

One of the best ways to share your machine learning model, API, or data science workflow with others is to create an interactive app that allows your users or colleagues to try out the demo in their browsers.

\n\n

Gradio allows you to build demos and share them, all in Python. And usually in just a few lines of code! So let's get started.

\n\n

Hello, World

\n\n

To get Gradio running with a simple \"Hello, World\" example, follow these three steps:

\n\n

1. Install Gradio using pip:

\n\n
pip install gradio\n
\n\n

2. Run the code below as a Python script or in a Jupyter Notebook (or Google Colab):

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n\ndemo.launch()   \n
\n\n

We shorten the imported name to gr for better readability of code using Gradio. This is a widely adopted convention that you should follow so that anyone working with your code can easily understand it.

\n\n

3. The demo below will appear automatically within the Jupyter Notebook, or pop in a browser on http://localhost:7860 if running from a script:

\n\n

\n\n

When developing locally, if you want to run the code as a Python script, you can use the Gradio CLI to launch the application in reload mode, which will provide seamless and fast development. Learn more about reloading in the Auto-Reloading Guide.

\n\n
gradio app.py\n
\n\n

Note: you can also do python app.py, but it won't provide the automatic reload mechanism.

\n\n

The Interface Class

\n\n

You'll notice that in order to make the demo, we created a gr.Interface. This Interface class can wrap any Python function with a user interface. In the example above, we saw a simple text-based function, but the function could be anything from music generator to a tax calculator to the prediction function of a pretrained machine learning model.

\n\n

The core Interface class is initialized with three required parameters:

\n\n
    \n
  • fn: the function to wrap a UI around
  • \n
  • inputs: which component(s) to use for the input (e.g. \"text\", \"image\" or \"audio\")
  • \n
  • outputs: which component(s) to use for the output (e.g. \"text\", \"image\" or \"label\")
  • \n
\n\n

Let's take a closer look at these components used to provide input and output.

\n\n

Components Attributes

\n\n

We saw some simple Textbox components in the previous examples, but what if you want to change how the UI components look or behave?

\n\n

Let's say you want to customize the input text field \u2014 for example, you wanted it to be larger and have a text placeholder. If we use the actual class for Textbox instead of using the string shortcut, you have access to much more customizability through component attributes.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(\n    fn=greet,\n    inputs=gr.Textbox(lines=2, placeholder=\"Name Here...\"),\n    outputs=\"text\",\n)\ndemo.launch()\n\n
\n\n

\n\n

Multiple Input and Output Components

\n\n

Suppose you had a more complex function, with multiple inputs and outputs. In the example below, we define a function that takes a string, boolean, and number, and returns a string and number. Take a look how you pass a list of input and output components.

\n\n
import gradio as gr\n\ndef greet(name, is_morning, temperature):\n    salutation = \"Good morning\" if is_morning else \"Good evening\"\n    greeting = f\"{salutation} {name}. It is {temperature} degrees today\"\n    celsius = (temperature - 32) * 5 / 9\n    return greeting, round(celsius, 2)\n\ndemo = gr.Interface(\n    fn=greet,\n    inputs=[\"text\", \"checkbox\", gr.Slider(0, 100)],\n    outputs=[\"text\", \"number\"],\n)\ndemo.launch()\n\n
\n\n

\n\n

You simply wrap the components in a list. Each component in the inputs list corresponds to one of the parameters of the function, in order. Each component in the outputs list corresponds to one of the values returned by the function, again in order.

\n\n

An Image Example

\n\n

Gradio supports many types of components, such as Image, DataFrame, Video, or Label. Let's try an image-to-image function to get a feel for these!

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n    sepia_filter = np.array([\n        [0.393, 0.769, 0.189], \n        [0.349, 0.686, 0.168], \n        [0.272, 0.534, 0.131]\n    ])\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n
\n\n

\n\n

When using the Image component as input, your function will receive a NumPy array with the shape (height, width, 3), where the last dimension represents the RGB values. We'll return an image as well in the form of a NumPy array.

\n\n

You can also set the datatype used by the component with the type= keyword argument. For example, if you wanted your function to take a file path to an image instead of a NumPy array, the input Image component could be written as:

\n\n
gr.Image(type=\"filepath\", shape=...)\n
\n\n

Also note that our input Image component comes with an edit button \ud83d\udd89, which allows for cropping and zooming into images. Manipulating images in this way can help reveal biases or hidden flaws in a machine learning model!

\n\n

You can read more about the many components and how to use them in the Gradio docs.

\n\n

Chatbots

\n\n

Gradio includes a high-level class, gr.ChatInterface, which is similar to gr.Interface, but is specifically designed for chatbot UIs. The gr.ChatInterface class also wraps a function but this function must have a specific signature. The function should take two arguments: message and then history (the arguments can be named anything, but must be in this order)

\n\n
    \n
  • message: a str representing the user's input
  • \n
  • history: a list of list representing the conversations up until that point. Each inner list consists of two str representing a pair: [user input, bot response].
  • \n
\n\n

Your function should return a single string response, which is the bot's response to the particular user input message.

\n\n

Other than that, gr.ChatInterface has no required parameters (though several are available for customization of the UI).

\n\n

Here's a toy example:

\n\n
import random\nimport gradio as gr\n\ndef random_response(message, history):\n    return random.choice([\"Yes\", \"No\"])\n\ndemo = gr.ChatInterface(random_response)\n\ndemo.launch()\n\n
\n\n

\n\n

You can read more about gr.ChatInterface here.

\n\n

Blocks: More Flexibility and Control

\n\n

Gradio offers two approaches to build apps:

\n\n

1. Interface and ChatInterface, which provide a high-level abstraction for creating demos that we've been discussing so far.

\n\n

2. Blocks, a low-level API for designing web apps with more flexible layouts and data flows. Blocks allows you to do things like feature multiple data flows and demos, control where components appear on the page, handle complex data flows (e.g. outputs can serve as inputs to other functions), and update properties/visibility of components based on user interaction \u2014 still all in Python. If this customizability is what you need, try Blocks instead!

\n\n

Hello, Blocks

\n\n

Let's take a look at a simple example. Note how the API here differs from Interface.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n    name = gr.Textbox(label=\"Name\")\n    output = gr.Textbox(label=\"Output Box\")\n    greet_btn = gr.Button(\"Greet\")\n    greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n\n\ndemo.launch()\n
\n\n

\n\n

Things to note:

\n\n
    \n
  • Blocks are made with a with clause, and any component created inside this clause is automatically added to the app.
  • \n
  • Components appear vertically in the app in the order they are created. (Later we will cover customizing layouts!)
  • \n
  • A Button was created, and then a click event-listener was added to this button. The API for this should look familiar! Like an Interface, the click method takes a Python function, input components, and output components.
  • \n
\n\n

More Complexity

\n\n

Here's an app to give you a taste of what's possible with Blocks:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

A lot more going on here! We'll cover how to create complex Blocks apps like this in the building with blocks section for you.

\n\n

Congrats, you're now familiar with the basics of Gradio! \ud83e\udd73 Go to our next guide to learn more about the key features of Gradio.

\n", "tags": [], "spaces": [], "url": "/guides/quickstart/", "contributor": null}, {"name": "key-features", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 2, "absolute_index": 1, "pretty_name": "Key Features", "content": "# Key Features\n\nLet's go through some of the most popular features of Gradio! Here are Gradio's key features:\n\n1. [Adding example inputs](#example-inputs)\n2. [Passing custom error messages](#alerts)\n3. [Adding descriptive content](#descriptive-content)\n4. [Setting up flagging](#flagging)\n5. [Preprocessing and postprocessing](#preprocessing-and-postprocessing)\n6. [Styling demos](#styling)\n7. [Queuing users](#queuing)\n8. [Iterative outputs](#iterative-outputs)\n9. [Progress bars](#progress-bars)\n10. [Batch functions](#batch-functions)\n11. [Running on collaborative notebooks](#colab-notebooks)\n\n## Example Inputs\n\nYou can provide example data that a user can easily load into `Interface`. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a **nested list** to the `examples=` keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the [Docs](https://gradio.app/docs#components).\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n if num2 == 0:\n raise gr.Error(\"Cannot divide by zero!\")\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\", \n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n examples=[\n [5, \"add\", 3],\n [4, \"divide\", 2],\n [-4, \"multiply\", 2.5],\n [0, \"subtract\", 1.2],\n ],\n title=\"Toy Calculator\",\n description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n```\n\n\nYou can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the `examples_per_page` argument of `Interface`).\n\nContinue learning about examples in the [More On Examples](https://gradio.app/more-on-examples) guide.\n\n## Alerts\n\nYou wish to pass custom error messages to the user. To do so, raise a `gr.Error(\"custom message\")` to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the [docs](https://gradio.app/docs#error). \n\nYou can also issue `gr.Warning(\"message\")` and `gr.Info(\"message\")` by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work. \n\nNote below how the `gr.Error` has to be raised, while the `gr.Warning` and `gr.Info` are single lines.\n\n```python\ndef start_process(name):\n gr.Info(\"Starting process\")\n if name is None:\n gr.Warning(\"Name is empty\")\n ...\n if success == False:\n raise gr.Error(\"Process failed\")\n```\n \n## Descriptive Content\n\nIn the previous example, you may have noticed the `title=` and `description=` keyword arguments in the `Interface` constructor that helps users understand your app.\n\nThere are three arguments in the `Interface` constructor to specify where this content should go:\n\n* `title`: which accepts text and can display it at the very top of interface, and also becomes the page title.\n* `description`: which accepts text, markdown or HTML and places it right under the title.\n* `article`: which also accepts text, markdown or HTML and places it below the interface.\n\n![annotated](https://github.com/gradio-app/gradio/blob/main/guides/assets/annotated.png?raw=true)\n\nIf you're using the `Blocks` API instead, you can insert text, markdown, or HTML anywhere using the `gr.Markdown(...)` or `gr.HTML(...)` components, with descriptive content inside the `Component` constructor.\n\nAnother useful keyword argument is `label=`, which is present in every `Component`. This modifies the label text at the top of each `Component`. You can also add the `info=` keyword argument to form elements like `Textbox` or `Radio` to provide further information on their usage.\n\n```python\ngr.Number(label='Age', info='In years, must be greater than 0')\n```\n\n## Flagging\n\nBy default, an `Interface` will have \"Flag\" button. When a user testing your `Interface` sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the `flagging_dir=` argument to the `Interface` constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.\n\nFor example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- calculator.py\n+-- flagged/\n| +-- logs.csv\n```\n\n*flagged/logs.csv*\n\n```csv\nnum1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n```\n\nWith the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- sepia.py\n+-- flagged/\n| +-- logs.csv\n| +-- im/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n\n*flagged/logs.csv*\n\n```csv\nim,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.\n\n## Preprocessing and Postprocessing\n\n![](https://github.com/gradio-app/gradio/blob/main/js/_website/src/assets/img/dataflow.svg?raw=true)\n\nAs you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.\n\nWhen a component is used as an input, Gradio automatically handles the *preprocessing* needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a `numpy` array).\n\nSimilarly, when a component is used as an output, Gradio automatically handles the *postprocessing* needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a `Gallery` of images in base64 format).\n\nYou can control the *preprocessing* using the parameters when constructing the image component. For example, here if you instantiate the `Image` component with the following parameters, it will convert the image to the `PIL` type and reshape it to be `(100, 100)` no matter the original size that it was submitted as:\n\n```py\nimg = gr.Image(shape=(100, 100), type=\"pil\")\n```\n\nIn contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:\n\n```py\nimg = gr.Image(invert_colors=True, type=\"numpy\")\n```\n\nPostprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the `Image` a `numpy` array or a `str` filepath?) and postprocesses it into a format that can be displayed by the browser.\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the preprocessing-related parameters for each Component.\n\n## Styling\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Interface` constructor. For example:\n\n```python\ndemo = gr.Interface(..., theme=gr.themes.Monochrome())\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](https://gradio.app/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n\n```python\nwith gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nSome components can be additionally styled through the `style()` method. For example:\n\n```python\nimg = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n```\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the styling options for each Component.\n\n## Queuing\n\nIf your app expects heavy traffic, use the `queue()` method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(...).queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```python\nwith gr.Blocks() as demo:\n #...\ndemo.queue()\ndemo.launch()\n```\n\nYou can control the number of requests processed at a single time as such:\n\n```python\ndemo.queue(concurrency_count=3)\n```\n\nSee the [Docs on queueing](/docs/#queue) on configuring other queuing parameters.\n\nTo specify only certain functions for queueing in Blocks:\n\n```python\nwith gr.Blocks() as demo2:\n num1 = gr.Number()\n num2 = gr.Number()\n output = gr.Number()\n gr.Button(\"Add\").click(\n lambda a, b: a + b, [num1, num2], output)\n gr.Button(\"Multiply\").click(\n lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n```\n\n## Iterative Outputs\n\nIn some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.\n\nIn such cases, you can supply a **generator** function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single `return` value, a function should `yield` a series of values instead. Usually the `yield` statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:\n\n```python\ndef my_generator(x):\n for i in range(x):\n yield i\n```\n\nYou supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:\n\n```python\nimport gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n for _ in range(steps):\n time.sleep(1)\n image = np.random.random((600, 600, 3))\n yield image\n image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n```\n\n\nNote that we've added a `time.sleep(1)` in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).\n\nSupplying a generator into Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n## Progress Bars\n\nGradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a `gr.Progress` instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the `tqdm()` method of the `Progress` instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.\n\n```python\nimport gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n progress(0, desc=\"Starting\")\n time.sleep(1)\n progress(0.05)\n new_string = \"\"\n for letter in progress.tqdm(word, desc=\"Reversing\"):\n time.sleep(0.25)\n new_string = letter + new_string\n return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n demo.queue(concurrency_count=10).launch()\n\n```\n\n\nIf you use the `tqdm` library, you can even report progress updates automatically from any `tqdm.tqdm` that already exists within your function by setting the default argument as `gr.Progress(track_tqdm=True)`!\n\n## Batch Functions\n\nGradio supports the ability to pass *batch* functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.\n\nFor example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:\n\n```py\nimport time\n\ndef trim_words(words, lens):\n trimmed_words = []\n time.sleep(5)\n for w, l in zip(words, lens):\n trimmed_words.append(w[:int(l)]) \n return [trimmed_words]\n```\n\nThe advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically *batch* incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe `batch=True` and `max_batch_size=16` -- both of these parameters can be passed\ninto event triggers or into the `Interface` class)\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n word = gr.Textbox(label=\"word\")\n leng = gr.Number(label=\"leng\")\n output = gr.Textbox(label=\"Output\")\n with gr.Row():\n run = gr.Button()\n\n event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n```\n\nIn the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face `transformers` and `diffusers` models\nwork very naturally with Gradio's batch mode: here's [an example demo using diffusers to\ngenerate images in batches](https://github.com/gradio-app/gradio/blob/main/demo/diffusers_with_batching/run.py)\n\nNote: using batch functions with Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n\n## Colab Notebooks\n\n\nGradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as [Google Colab](https://colab.research.google.com/). In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by [service worker tunneling](https://github.com/tensorflow/tensorboard/blob/master/docs/design/colab_integration.md), which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use [SSH tunneling](https://coderwall.com/p/ohk6cg/remote-access-to-ipython-notebooks-via-ssh) to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, [discussed in the next Guide](https://gradio.app/guides/sharing-your-app/#sharing-demos). ", "html": "

Key Features

\n\n

Let's go through some of the most popular features of Gradio! Here are Gradio's key features:

\n\n
    \n
  1. Adding example inputs
  2. \n
  3. Passing custom error messages
  4. \n
  5. Adding descriptive content
  6. \n
  7. Setting up flagging
  8. \n
  9. Preprocessing and postprocessing
  10. \n
  11. Styling demos
  12. \n
  13. Queuing users
  14. \n
  15. Iterative outputs
  16. \n
  17. Progress bars
  18. \n
  19. Batch functions
  20. \n
  21. Running on collaborative notebooks
  22. \n
\n\n

Example Inputs

\n\n

You can provide example data that a user can easily load into Interface. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a nested list to the examples= keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the Docs.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        if num2 == 0:\n            raise gr.Error(\"Cannot divide by zero!\")\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\", \n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    examples=[\n        [5, \"add\", 3],\n        [4, \"divide\", 2],\n        [-4, \"multiply\", 2.5],\n        [0, \"subtract\", 1.2],\n    ],\n    title=\"Toy Calculator\",\n    description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n
\n\n

\n\n

You can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the examples_per_page argument of Interface).

\n\n

Continue learning about examples in the More On Examples guide.

\n\n

Alerts

\n\n

You wish to pass custom error messages to the user. To do so, raise a gr.Error(\"custom message\") to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the docs.

\n\n

You can also issue gr.Warning(\"message\") and gr.Info(\"message\") by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work.

\n\n

Note below how the gr.Error has to be raised, while the gr.Warning and gr.Info are single lines.

\n\n
def start_process(name):\n    gr.Info(\"Starting process\")\n    if name is None:\n        gr.Warning(\"Name is empty\")\n    ...\n    if success == False:\n        raise gr.Error(\"Process failed\")\n
\n\n

Descriptive Content

\n\n

In the previous example, you may have noticed the title= and description= keyword arguments in the Interface constructor that helps users understand your app.

\n\n

There are three arguments in the Interface constructor to specify where this content should go:

\n\n
    \n
  • title: which accepts text and can display it at the very top of interface, and also becomes the page title.
  • \n
  • description: which accepts text, markdown or HTML and places it right under the title.
  • \n
  • article: which also accepts text, markdown or HTML and places it below the interface.
  • \n
\n\n

\"annotated\"

\n\n

If you're using the Blocks API instead, you can insert text, markdown, or HTML anywhere using the gr.Markdown(...) or gr.HTML(...) components, with descriptive content inside the Component constructor.

\n\n

Another useful keyword argument is label=, which is present in every Component. This modifies the label text at the top of each Component. You can also add the info= keyword argument to form elements like Textbox or Radio to provide further information on their usage.

\n\n
gr.Number(label='Age', info='In years, must be greater than 0')\n
\n\n

Flagging

\n\n

By default, an Interface will have \"Flag\" button. When a user testing your Interface sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the flagging_dir= argument to the Interface constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.

\n\n

For example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- calculator.py\n+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n
\n\n

With the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- sepia.py\n+-- flagged/\n|   +-- logs.csv\n|   +-- im/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.

\n\n

Preprocessing and Postprocessing

\n\n

\"\"

\n\n

As you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.

\n\n

When a component is used as an input, Gradio automatically handles the preprocessing needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a numpy array).

\n\n

Similarly, when a component is used as an output, Gradio automatically handles the postprocessing needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a Gallery of images in base64 format).

\n\n

You can control the preprocessing using the parameters when constructing the image component. For example, here if you instantiate the Image component with the following parameters, it will convert the image to the PIL type and reshape it to be (100, 100) no matter the original size that it was submitted as:

\n\n
img = gr.Image(shape=(100, 100), type=\"pil\")\n
\n\n

In contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:

\n\n
img = gr.Image(invert_colors=True, type=\"numpy\")\n
\n\n

Postprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the Image a numpy array or a str filepath?) and postprocesses it into a format that can be displayed by the browser.

\n\n

Take a look at the Docs to see all the preprocessing-related parameters for each Component.

\n\n

Styling

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Interface constructor. For example:

\n\n
demo = gr.Interface(..., theme=gr.themes.Monochrome())\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.\nThe base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

Some components can be additionally styled through the style() method. For example:

\n\n
img = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n
\n\n

Take a look at the Docs to see all the styling options for each Component.

\n\n

Queuing

\n\n

If your app expects heavy traffic, use the queue() method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).

\n\n

With Interface:

\n\n
demo = gr.Interface(...).queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
with gr.Blocks() as demo:\n    #...\ndemo.queue()\ndemo.launch()\n
\n\n

You can control the number of requests processed at a single time as such:

\n\n
demo.queue(concurrency_count=3)\n
\n\n

See the Docs on queueing on configuring other queuing parameters.

\n\n

To specify only certain functions for queueing in Blocks:

\n\n
with gr.Blocks() as demo2:\n    num1 = gr.Number()\n    num2 = gr.Number()\n    output = gr.Number()\n    gr.Button(\"Add\").click(\n        lambda a, b: a + b, [num1, num2], output)\n    gr.Button(\"Multiply\").click(\n        lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n
\n\n

Iterative Outputs

\n\n

In some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.

\n\n

In such cases, you can supply a generator function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single return value, a function should yield a series of values instead. Usually the yield statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:

\n\n
def my_generator(x):\n    for i in range(x):\n        yield i\n
\n\n

You supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:

\n\n
import gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n    for _ in range(steps):\n        time.sleep(1)\n        image = np.random.random((600, 600, 3))\n        yield image\n    image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n    yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n
\n\n

\n\n

Note that we've added a time.sleep(1) in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).

\n\n

Supplying a generator into Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Progress Bars

\n\n

Gradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a gr.Progress instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the tqdm() method of the Progress instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.

\n\n
import gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n    progress(0, desc=\"Starting\")\n    time.sleep(1)\n    progress(0.05)\n    new_string = \"\"\n    for letter in progress.tqdm(word, desc=\"Reversing\"):\n        time.sleep(0.25)\n        new_string = letter + new_string\n    return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n    demo.queue(concurrency_count=10).launch()\n\n
\n\n

\n\n

If you use the tqdm library, you can even report progress updates automatically from any tqdm.tqdm that already exists within your function by setting the default argument as gr.Progress(track_tqdm=True)!

\n\n

Batch Functions

\n\n

Gradio supports the ability to pass batch functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.

\n\n

For example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:

\n\n
import time\n\ndef trim_words(words, lens):\n    trimmed_words = []\n    time.sleep(5)\n    for w, l in zip(words, lens):\n        trimmed_words.append(w[:int(l)])        \n    return [trimmed_words]\n
\n\n

The advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically batch incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe batch=True and max_batch_size=16 -- both of these parameters can be passed\ninto event triggers or into the Interface class)

\n\n

With Interface:

\n\n
demo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n                    batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        word = gr.Textbox(label=\"word\")\n        leng = gr.Number(label=\"leng\")\n        output = gr.Textbox(label=\"Output\")\n    with gr.Row():\n        run = gr.Button()\n\n    event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n
\n\n

In the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face transformers and diffusers models\nwork very naturally with Gradio's batch mode: here's an example demo using diffusers to\ngenerate images in batches

\n\n

Note: using batch functions with Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Colab Notebooks

\n\n

Gradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as Google Colab. In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by service worker tunneling, which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use SSH tunneling to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, discussed in the next Guide.

\n", "tags": [], "spaces": [], "url": "/guides/key-features/", "contributor": null}, {"name": "sharing-your-app", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 3, "absolute_index": 2, "pretty_name": "Sharing Your App", "content": "# Sharing Your App\n\nHow to share your Gradio app: \n\n1. [Sharing demos with the share parameter](#sharing-demos)\n2. [Hosting on HF Spaces](#hosting-on-hf-spaces)\n3. [Embedding hosted spaces](#embedding-hosted-spaces)\n4. [Embedding with web components](#embedding-with-web-components)\n5. [Using the API page](#api-page)\n6. [Adding authentication to the page](#authentication)\n7. [Accessing Network Requests](#accessing-the-network-request-directly)\n8. [Mounting within FastAPI](#mounting-within-another-fast-api-app)\n9. [Security](#security-and-file-access)\n\n## Sharing Demos\n\nGradio demos can be easily shared publicly by setting `share=True` in the `launch()` method. Like this:\n\n```python\ndemo.launch(share=True)\n```\n\nThis generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: **XXXXX.gradio.app**. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.\n\nKeep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set `share=False` (the default, except in colab notebooks), only a local link is created, which can be shared by [port-forwarding](https://www.ssh.com/ssh/tunneling/example) with specific users. \n\n![sharing](https://github.com/gradio-app/gradio/blob/main/guides/assets/sharing.svg?raw=true)\n\nShare links expire after 72 hours.\n\n## Hosting on HF Spaces\n\nIf you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. [Hugging Face Spaces](http://huggingface.co/spaces/) provides the infrastructure to permanently host your machine learning model for free! \n\nAfter you have [created a free Hugging Face account](https://huggingface.co/join), you have three methods to deploy your Gradio app to Hugging Face Spaces:\n\n1. From terminal: run `gradio deploy` in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on `git push`.\n\n2. From your browser: Drag and drop a folder containing your Gradio model and all related files [here](https://huggingface.co/new-space).\n\n3. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See [this guide how to host on Hugging Face Spaces](https://huggingface.co/blog/gradio-spaces) for more information. \n\n\n\nNote: Some components, like `gr.Image`, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with `show_share_button`, such as `gr.Image(show_share_button=False)`. \n\n![Image with show_share_button=True](https://github.com/gradio-app/gradio/blob/main/guides/assets/share_icon.png?raw=true)\n\n## Embedding Hosted Spaces\n\nOnce you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.\n\nThere are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:\n\n![Embed this Space dropdown option](https://github.com/gradio-app/gradio/blob/main/guides/assets/embed_this_space.png?raw=true)\n\n### Embedding with Web Components\n\nWeb components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app. \n\nTo embed with Web Components:\n\n1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using). \n\n```html\n\n```\n\n2. Add \n```html\n\n```\n\nelement where you want to place the app. Set the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:\n\n\n```html\n\n```\n\n\n\nYou can see examples of how web components look on the Gradio landing page.\n\nYou can also customize the appearance and behavior of your web component with attributes that you pass into the `` tag:\n\n* `src`: as we've seen, the `src` attributes links to the URL of the hosted Gradio demo that you would like to embed\n* `space`: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a `username/space_name` instead of a full URL. Example: `gradio/Echocardiogram-Segmentation`. If this attribute attribute is provided, then `src` does not need to be provided.\n* `control_page_title`: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default `\"false\"`)\n* `initial_height`: the initial height of the web component while it is loading the Gradio app, (by default `\"300px\"`). Note that the final height is set based on the size of the Gradio app.\n* `container`: whether to show the border frame and information about where the Space is hosted (by default `\"true\"`)\n* `info`: whether to show just the information about where the Space is hosted underneath the embedded app (by default `\"true\"`)\n* `autoscroll`: whether to autoscroll to the output when prediction has finished (by default `\"false\"`)\n* `eager`: whether to load the Gradio app as soon as the page loads (by default `\"false\"`)\n* `theme_mode`: whether to use the `dark`, `light`, or default `system` theme mode (by default `\"system\"`)\n\nHere's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px. \n\n```html\n\n```\n\n_Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as `header { ... }` and `footer { ... }` will be the most likely to cause issues._\n\n### Embedding with IFrames\n\nTo embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:\n\n```html\n\n```\n\nAgain, you can find the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.\n\nNote: if you use IFrames, you'll probably want to add a fixed `height` attribute and set `style=\"border:0;\"` to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the `allow` attribute.\n\n## API Page\n\nYou can use almost any Gradio app as an API! In the footer of a Gradio app [like this one](https://huggingface.co/spaces/gradio/hello_world), you'll see a \"Use via API\" link. \n\n![Use via API](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/api3.gif)\n\nThis is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either [the Python client](https://gradio.app/guides/getting-started-with-the-python-client/), or [the JavaScript client](https://gradio.app/guides/getting-started-with-the-js-client/). For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.\n\nThe endpoints are automatically created when you launch a Gradio `Interface`. If you are using Gradio `Blocks`, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as\n\n```python\nbtn.click(add, [num1, num2], output, api_name=\"addition\")\n```\n\nThis will add and document the endpoint `/api/addition/` to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints. \n\n*Note*: For Gradio apps in which [queueing is enabled](https://gradio.app/guides/key-features#queuing), users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set `api_open=False` in the `queue()` method. To disable the API page altogether, set `show_api=False` in `.launch()`.\n\n## Authentication\n\nYou may wish to put an authentication page in front of your app to limit who can open your app. With the `auth=` keyword argument in the `launch()` method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":\n\n```python\ndemo.launch(auth=(\"admin\", \"pass1234\"))\n```\n\nFor more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.\n\nHere's an example of a function that accepts any login where the username and password are the same:\n\n```python\ndef same_auth(username, password):\n return username == password\ndemo.launch(auth=same_auth)\n```\n\nFor authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.\n\n## Accessing the Network Request Directly\n\nWhen a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is `gr.Request` and Gradio will pass in the network request as that parameter. Here is an example:\n\n```python\nimport gradio as gr\n\ndef echo(name, request: gr.Request):\n if request:\n print(\"Request headers dictionary:\", request.headers)\n print(\"IP address:\", request.client.host)\n return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n```\n\nNote: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then `request` will be `None`. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check `if request`.\n\n## Mounting Within Another FastAPI App\n\nIn some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with `gradio.mount_gradio_app()`.\n\nHere's a complete example:\n\n```python\nfrom fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n```\n\nNote that this approach also allows you run your Gradio apps on custom paths (`http://localhost:8000/gradio` in the example above).\n\n## Security and File Access\n\nSharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) **exposes** certain files on the host machine to users of your Gradio app. \n\nIn particular, Gradio apps ALLOW users to access to three kinds of files:\n\n* **Files in the same directory (or a subdirectory) of where the Gradio script is launched from.** For example, if the path to your gradio scripts is `/home/usr/scripts/project/app.py` and you launch it from `/home/usr/scripts/project/`, then users of your shared Gradio app will be able to access any files inside `/home/usr/scripts/project/`. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's `examples`).\n\n* **Temporary files created by Gradio.** These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable `GRADIO_TEMP_DIR` to an absolute path, such as `/home/usr/scripts/project/temp/`.\n\n* **Files that you explicitly allow via the `allowed_paths` parameter in `launch()`**. This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).\n\nGradio DOES NOT ALLOW access to:\n\n* **Dotfiles** (any files whose name begins with `'.'`) or any files that are contained in any directory whose name begins with `'.'`\n\n* **Files that you explicitly allow via the `blocked_paths` parameter in `launch()`**. You can pass in a list of additional directories or exact filepaths to the `blocked_paths` parameter in `launch()`. This parameter takes precedence over the files that Gradio exposes by default or by the `allowed_paths`.\n\n* **Any other paths on the host machine**. Users should NOT be able to access other arbitrary paths on the host. \n\nPlease make sure you are running the latest version of `gradio` for these security settings to apply. ", "html": "

Sharing Your App

\n\n

How to share your Gradio app:

\n\n
    \n
  1. Sharing demos with the share parameter
  2. \n
  3. Hosting on HF Spaces
  4. \n
  5. Embedding hosted spaces
  6. \n
  7. Embedding with web components
  8. \n
  9. Using the API page
  10. \n
  11. Adding authentication to the page
  12. \n
  13. Accessing Network Requests
  14. \n
  15. Mounting within FastAPI
  16. \n
  17. Security
  18. \n
\n\n

Sharing Demos

\n\n

Gradio demos can be easily shared publicly by setting share=True in the launch() method. Like this:

\n\n
demo.launch(share=True)\n
\n\n

This generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: XXXXX.gradio.app. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.

\n\n

Keep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set share=False (the default, except in colab notebooks), only a local link is created, which can be shared by port-forwarding with specific users.

\n\n

\"sharing\"

\n\n

Share links expire after 72 hours.

\n\n

Hosting on HF Spaces

\n\n

If you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. Hugging Face Spaces provides the infrastructure to permanently host your machine learning model for free!

\n\n

After you have created a free Hugging Face account, you have three methods to deploy your Gradio app to Hugging Face Spaces:

\n\n
    \n
  1. From terminal: run gradio deploy in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on git push.

  2. \n
  3. From your browser: Drag and drop a folder containing your Gradio model and all related files here.

  4. \n
  5. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See this guide how to host on Hugging Face Spaces for more information.

  6. \n
\n\n

\n\n

Note: Some components, like gr.Image, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with show_share_button, such as gr.Image(show_share_button=False).

\n\n

\"Imagesharebutton=True\" />

\n\n

Embedding Hosted Spaces

\n\n

Once you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.

\n\n

There are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:

\n\n

\"Embed

\n\n

Embedding with Web Components

\n\n

Web components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app.

\n\n

To embed with Web Components:

\n\n
    \n
  1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using).
  2. \n
\n\n
\n
\n\n
    \n
  1. Add
  2. \n
\n\n
\n
\n\n

element where you want to place the app. Set the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:

\n\n
\n
\n\n\n\n

You can see examples of how web components look on the Gradio landing page.

\n\n

You can also customize the appearance and behavior of your web component with attributes that you pass into the <gradio-app> tag:

\n\n
    \n
  • src: as we've seen, the src attributes links to the URL of the hosted Gradio demo that you would like to embed
  • \n
  • space: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a username/space_name instead of a full URL. Example: gradio/Echocardiogram-Segmentation. If this attribute attribute is provided, then src does not need to be provided.
  • \n
  • control_page_title: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default \"false\")
  • \n
  • initial_height: the initial height of the web component while it is loading the Gradio app, (by default \"300px\"). Note that the final height is set based on the size of the Gradio app.
  • \n
  • container: whether to show the border frame and information about where the Space is hosted (by default \"true\")
  • \n
  • info: whether to show just the information about where the Space is hosted underneath the embedded app (by default \"true\")
  • \n
  • autoscroll: whether to autoscroll to the output when prediction has finished (by default \"false\")
  • \n
  • eager: whether to load the Gradio app as soon as the page loads (by default \"false\")
  • \n
  • theme_mode: whether to use the dark, light, or default system theme mode (by default \"system\")
  • \n
\n\n

Here's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px.

\n\n
\n
\n\n

Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as header { ... } and footer { ... } will be the most likely to cause issues.

\n\n

Embedding with IFrames

\n\n

To embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:

\n\n
\n
\n\n

Again, you can find the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.

\n\n

Note: if you use IFrames, you'll probably want to add a fixed height attribute and set style=\"border:0;\" to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the allow attribute.

\n\n

API Page

\n\n

You can use almost any Gradio app as an API! In the footer of a Gradio app like this one, you'll see a \"Use via API\" link.

\n\n

\"Use

\n\n

This is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either the Python client, or the JavaScript client. For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.

\n\n

The endpoints are automatically created when you launch a Gradio Interface. If you are using Gradio Blocks, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as

\n\n
btn.click(add, [num1, num2], output, api_name=\"addition\")\n
\n\n

This will add and document the endpoint /api/addition/ to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints.

\n\n

Note: For Gradio apps in which queueing is enabled, users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set api_open=False in the queue() method. To disable the API page altogether, set show_api=False in .launch().

\n\n

Authentication

\n\n

You may wish to put an authentication page in front of your app to limit who can open your app. With the auth= keyword argument in the launch() method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":

\n\n
demo.launch(auth=(\"admin\", \"pass1234\"))\n
\n\n

For more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.

\n\n

Here's an example of a function that accepts any login where the username and password are the same:

\n\n
def same_auth(username, password):\n    return username == password\ndemo.launch(auth=same_auth)\n
\n\n

For authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.

\n\n

Accessing the Network Request Directly

\n\n

When a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is gr.Request and Gradio will pass in the network request as that parameter. Here is an example:

\n\n
import gradio as gr\n\ndef echo(name, request: gr.Request):\n    if request:\n        print(\"Request headers dictionary:\", request.headers)\n        print(\"IP address:\", request.client.host)\n    return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n
\n\n

Note: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then request will be None. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check if request.

\n\n

Mounting Within Another FastAPI App

\n\n

In some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with gradio.mount_gradio_app().

\n\n

Here's a complete example:

\n\n
from fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n    return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n
\n\n

Note that this approach also allows you run your Gradio apps on custom paths (http://localhost:8000/gradio in the example above).

\n\n

Security and File Access

\n\n

Sharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) exposes certain files on the host machine to users of your Gradio app.

\n\n

In particular, Gradio apps ALLOW users to access to three kinds of files:

\n\n
    \n
  • Files in the same directory (or a subdirectory) of where the Gradio script is launched from. For example, if the path to your gradio scripts is /home/usr/scripts/project/app.py and you launch it from /home/usr/scripts/project/, then users of your shared Gradio app will be able to access any files inside /home/usr/scripts/project/. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's examples).

  • \n
  • Temporary files created by Gradio. These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable GRADIO_TEMP_DIR to an absolute path, such as /home/usr/scripts/project/temp/.

  • \n
  • Files that you explicitly allow via the allowed_paths parameter in launch(). This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).

  • \n
\n\n

Gradio DOES NOT ALLOW access to:

\n\n
    \n
  • Dotfiles (any files whose name begins with '.') or any files that are contained in any directory whose name begins with '.'

  • \n
  • Files that you explicitly allow via the blocked_paths parameter in launch(). You can pass in a list of additional directories or exact filepaths to the blocked_paths parameter in launch(). This parameter takes precedence over the files that Gradio exposes by default or by the allowed_paths.

  • \n
  • Any other paths on the host machine. Users should NOT be able to access other arbitrary paths on the host.

  • \n
\n\n

Please make sure you are running the latest version of gradio for these security settings to apply.

\n", "tags": [], "spaces": [], "url": "/guides/sharing-your-app/", "contributor": null}, {"name": "interface-state", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 1, "absolute_index": 3, "pretty_name": "Interface State", "content": "# Interface State\n\nThis guide covers how State is handled in Gradio. Learn the difference between Global and Session states, and how to use both.\n\n## Global State\n\nYour function may use data that persists beyond a single function call. If the data is something accessible to all function calls and all users, you can create a variable outside the function call and access it inside the function. For example, you may load a large model outside the function and use it inside the function so that every function call does not need to reload the model. \n\n```python\nimport gradio as gr\n\nscores = []\n\ndef track_score(score):\n scores.append(score)\n top_scores = sorted(scores, reverse=True)[:3]\n return top_scores\n\ndemo = gr.Interface(\n track_score, \n gr.Number(label=\"Score\"), \n gr.JSON(label=\"Top Scores\")\n)\ndemo.launch()\n```\n\nIn the code above, the `scores` array is shared between all users. If multiple users are accessing this demo, their scores will all be added to the same list, and the returned top 3 scores will be collected from this shared reference. \n\n## Session State\n\nAnother type of data persistence Gradio supports is session **state**, where data persists across multiple submits within a page session. However, data is *not* shared between different users of your model. To store data in a session state, you need to do three things:\n\n1. Pass in an extra parameter into your function, which represents the state of the interface.\n2. At the end of the function, return the updated value of the state as an extra return value.\n3. Add the `'state'` input and `'state'` output components when creating your `Interface`\n\nA chatbot is an example where you would need session state - you want access to a users previous submissions, but you cannot store chat history in a global variable, because then chat history would get jumbled between different users. \n\n```python\nimport gradio as gr\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nimport torch\n\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-medium\")\nmodel = AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-medium\")\n\n\ndef user(message, history):\n return \"\", history + [[message, None]]\n\n\ndef bot(history):\n user_message = history[-1][0]\n new_user_input_ids = tokenizer.encode(\n user_message + tokenizer.eos_token, return_tensors=\"pt\"\n )\n\n # append the new user input tokens to the chat history\n bot_input_ids = torch.cat([torch.LongTensor([]), new_user_input_ids], dim=-1)\n\n # generate a response\n response = model.generate(\n bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id\n ).tolist()\n\n # convert the tokens to text, and then split the responses into lines\n response = tokenizer.decode(response[0]).split(\"<|endoftext|>\")\n response = [\n (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)\n ] # convert to tuples of list\n history[-1] = response[0]\n return history\n\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.Button(\"Clear\")\n\n msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.launch()\n\n```\n\n\nNotice how the state persists across submits within each page, but if you load this demo in another tab (or refresh the page), the demos will not share chat history. \n\nThe default value of `state` is None. If you pass a default value to the state parameter of the function, it is used as the default value of the state instead. The `Interface` class only supports a single input and outputs state variable, though it can be a list with multiple elements. For more complex use cases, you can use Blocks, [which supports multiple `State` variables](/guides/state-in-blocks/).", "html": "

Interface State

\n\n

This guide covers how State is handled in Gradio. Learn the difference between Global and Session states, and how to use both.

\n\n

Global State

\n\n

Your function may use data that persists beyond a single function call. If the data is something accessible to all function calls and all users, you can create a variable outside the function call and access it inside the function. For example, you may load a large model outside the function and use it inside the function so that every function call does not need to reload the model.

\n\n
import gradio as gr\n\nscores = []\n\ndef track_score(score):\n    scores.append(score)\n    top_scores = sorted(scores, reverse=True)[:3]\n    return top_scores\n\ndemo = gr.Interface(\n    track_score, \n    gr.Number(label=\"Score\"), \n    gr.JSON(label=\"Top Scores\")\n)\ndemo.launch()\n
\n\n

In the code above, the scores array is shared between all users. If multiple users are accessing this demo, their scores will all be added to the same list, and the returned top 3 scores will be collected from this shared reference.

\n\n

Session State

\n\n

Another type of data persistence Gradio supports is session state, where data persists across multiple submits within a page session. However, data is not shared between different users of your model. To store data in a session state, you need to do three things:

\n\n
    \n
  1. Pass in an extra parameter into your function, which represents the state of the interface.
  2. \n
  3. At the end of the function, return the updated value of the state as an extra return value.
  4. \n
  5. Add the 'state' input and 'state' output components when creating your Interface
  6. \n
\n\n

A chatbot is an example where you would need session state - you want access to a users previous submissions, but you cannot store chat history in a global variable, because then chat history would get jumbled between different users.

\n\n
import gradio as gr\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nimport torch\n\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-medium\")\nmodel = AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-medium\")\n\n\ndef user(message, history):\n    return \"\", history + [[message, None]]\n\n\ndef bot(history):\n    user_message = history[-1][0]\n    new_user_input_ids = tokenizer.encode(\n        user_message + tokenizer.eos_token, return_tensors=\"pt\"\n    )\n\n    # append the new user input tokens to the chat history\n    bot_input_ids = torch.cat([torch.LongTensor([]), new_user_input_ids], dim=-1)\n\n    # generate a response\n    response = model.generate(\n        bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id\n    ).tolist()\n\n    # convert the tokens to text, and then split the responses into lines\n    response = tokenizer.decode(response[0]).split(\"<|endoftext|>\")\n    response = [\n        (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)\n    ]  # convert to tuples of list\n    history[-1] = response[0]\n    return history\n\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.Button(\"Clear\")\n\n    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.launch()\n\n
\n\n

\n\n

Notice how the state persists across submits within each page, but if you load this demo in another tab (or refresh the page), the demos will not share chat history.

\n\n

The default value of state is None. If you pass a default value to the state parameter of the function, it is used as the default value of the state instead. The Interface class only supports a single input and outputs state variable, though it can be a list with multiple elements. For more complex use cases, you can use Blocks, which supports multiple State variables.

\n", "tags": [], "spaces": [], "url": "/guides/interface-state/", "contributor": null}, {"name": "reactive-interfaces", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 2, "absolute_index": 4, "pretty_name": "Reactive Interfaces", "content": "# Reactive Interfaces\n\nThis guide covers how to get Gradio interfaces to refresh automatically or continuously stream data.\n\n## Live Interfaces\n\nYou can make interfaces automatically refresh by setting `live=True` in the interface. Now the interface will recalculate as soon as the user input changes.\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\",\n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n live=True,\n)\ndemo.launch()\n\n```\n\n\nNote there is no submit button, because the interface resubmits automatically on change.\n\n## Streaming Components\n\nSome components have a \"streaming\" mode, such as `Audio` component in microphone mode, or the `Image` component in webcam mode. Streaming means data is sent continuously to the backend and the `Interface` function is continuously being rerun. \n\nThe difference between `gr.Audio(source='microphone')` and `gr.Audio(source='microphone', streaming=True)`, when both are used in `gr.Interface(live=True)`, is that the first `Component` will automatically submit data and run the `Interface` function when the user stops recording, whereas the second `Component` will continuously send data and run the `Interface` function *during* recording.\n\nHere is example code of streaming images from the webcam.\n\n```python\nimport gradio as gr\nimport numpy as np\n\ndef flip(im):\n return np.flipud(im)\n\ndemo = gr.Interface(\n flip, \n gr.Image(source=\"webcam\", streaming=True), \n \"image\",\n live=True\n)\ndemo.launch()\n \n```", "html": "

Reactive Interfaces

\n\n

This guide covers how to get Gradio interfaces to refresh automatically or continuously stream data.

\n\n

Live Interfaces

\n\n

You can make interfaces automatically refresh by setting live=True in the interface. Now the interface will recalculate as soon as the user input changes.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\",\n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    live=True,\n)\ndemo.launch()\n\n
\n\n

\n\n

Note there is no submit button, because the interface resubmits automatically on change.

\n\n

Streaming Components

\n\n

Some components have a \"streaming\" mode, such as Audio component in microphone mode, or the Image component in webcam mode. Streaming means data is sent continuously to the backend and the Interface function is continuously being rerun.

\n\n

The difference between gr.Audio(source='microphone') and gr.Audio(source='microphone', streaming=True), when both are used in gr.Interface(live=True), is that the first Component will automatically submit data and run the Interface function when the user stops recording, whereas the second Component will continuously send data and run the Interface function during recording.

\n\n

Here is example code of streaming images from the webcam.

\n\n
import gradio as gr\nimport numpy as np\n\ndef flip(im):\n    return np.flipud(im)\n\ndemo = gr.Interface(\n    flip, \n    gr.Image(source=\"webcam\", streaming=True), \n    \"image\",\n    live=True\n)\ndemo.launch()\n\n
\n", "tags": [], "spaces": [], "url": "/guides/reactive-interfaces/", "contributor": null}, {"name": "advanced-interface-features", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 4, "absolute_index": 6, "pretty_name": "Advanced Interface Features", "content": "# Advanced Interface Features\n\nThere's more to cover on the [Interface](https://gradio.app/docs#interface) class. This guide covers all the advanced features: Using [Interpretation](https://gradio.app/docs#interpretation), custom styling, loading from the [Hugging Face Hub](https://hf.co), and using [Parallel](https://gradio.app/docs#parallel) and [Series](https://gradio.app/docs#series). \n\n## Interpreting your Predictions\n\nMost models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the `interpretation` keyword in the `Interface` class to `default`. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:\n\n```python\nimport requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2() # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n```\n\n\nIn addition to `default`, Gradio also includes [Shapley-based interpretation](https://christophm.github.io/interpretable-ml-book/shap.html), which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the `interpretation` parameter to `\"shap\"` (note: also make sure the python package `shap` is installed). Optionally, you can modify the `num_shap` parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:\n\n```python\ngr.Interface(fn=classify_image,\n inputs=image, \n outputs=label, \n interpretation=\"shap\", \n num_shap=5).launch()\n```\n\nThis will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's `default` or `shap` interpretation, the output component must be a `Label`. All common input components are supported. Here is an example with text input.\n\n```python\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=\"default\",\n)\n\ndemo.launch()\n\n```\n\nSo what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.\n\nYou can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.\n\n```python\nimport re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n result = gender_of_sentence(sentence)\n is_male = result[\"male\"] > result[\"female\"]\n interpretation = []\n for word in re.split(\"( )\", sentence):\n score = 0\n token = word.lower()\n if (is_male and token in male_words) or (not is_male and token in female_words):\n score = 1\n elif (is_male and token in female_words) or (\n not is_male and token in male_words\n ):\n score = -1\n interpretation.append((word, score))\n # Output must be a list of lists containing the same number of elements as inputs\n # Each element corresponds to the interpretation scores for the given input\n return [interpretation]\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n```\n\nLearn more about Interpretation in the [docs](https://gradio.app/docs#interpretation). \n\n## Custom Styling\n\nIf you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the `css` parameter of the `Interface` class.\n\n```python\ngr.Interface(..., css=\"body {background-color: red}\")\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\ngr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n```\n\n**Warning**: Custom CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using [Themes](/guides/theming-guide/) whenever possible. \n\n## Loading Hugging Face Models and Spaces\n\nGradio integrates nicely with the [Hugging Face Hub](https://hf.co), allowing you to load models and Spaces with just one line of code. To use this, simply use the `load()` method in the `Interface` class. So:\n\n- To load any model from the Hugging Face Hub and create an interface around it, you pass `\"model/\"` or `\"huggingface/\"` followed by the model name, like these examples:\n\n```python\ngr.Interface.load(\"huggingface/gpt2\").launch();\n```\n\n```python\ngr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n inputs=gr.Textbox(lines=5, label=\"Input Text\") # customizes the input component\n).launch()\n```\n\n- To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass `\"spaces/\"` followed by the model name:\n\n```python\ngr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n inputs=\"webcam\", \n title=\"Remove your webcam background!\").launch()\n```\n\nOne of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting `Interface` object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):\n\n```python\nio = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\") # outputs model completion\n```\n\n## Putting Interfaces in Parallel and Series\n\nGradio also lets you mix interfaces very easily using the `gradio.Parallel` and `gradio.Series` classes. `Parallel` lets you put two similar models (if they have the same input type) in parallel to compare model predictions:\n\n```python\ngenerator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n```\n\n`Series` lets you put models and spaces in series, piping the output of one model into the input of the next model. \n\n```python\ngenerator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch() \n# this demo generates text, then translates it to German, and outputs the final result.\n```\n\nAnd of course, you can also mix `Parallel` and `Series` together whenever that makes sense!\n\nLearn more about Parallel and Series in the [docs](https://gradio.app/docs#parallel). ", "html": "

Advanced Interface Features

\n\n

There's more to cover on the Interface class. This guide covers all the advanced features: Using Interpretation, custom styling, loading from the Hugging Face Hub, and using Parallel and Series.

\n\n

Interpreting your Predictions

\n\n

Most models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the interpretation keyword in the Interface class to default. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:

\n\n
import requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2()  # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n    inp = inp.reshape((-1, 224, 224, 3))\n    inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n    prediction = inception_net.predict(inp).flatten()\n    return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n    fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n
\n\n

In addition to default, Gradio also includes Shapley-based interpretation, which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the interpretation parameter to \"shap\" (note: also make sure the python package shap is installed). Optionally, you can modify the num_shap parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:

\n\n
gr.Interface(fn=classify_image,\n            inputs=image, \n            outputs=label, \n            interpretation=\"shap\", \n            num_shap=5).launch()\n
\n\n

This will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's default or shap interpretation, the output component must be a Label. All common input components are supported. Here is an example with text input.

\n\n
import gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=\"default\",\n)\n\ndemo.launch()\n\n
\n\n

So what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.

\n\n

You can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.

\n\n
import re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n    result = gender_of_sentence(sentence)\n    is_male = result[\"male\"] > result[\"female\"]\n    interpretation = []\n    for word in re.split(\"( )\", sentence):\n        score = 0\n        token = word.lower()\n        if (is_male and token in male_words) or (not is_male and token in female_words):\n            score = 1\n        elif (is_male and token in female_words) or (\n            not is_male and token in male_words\n        ):\n            score = -1\n        interpretation.append((word, score))\n    # Output must be a list of lists containing the same number of elements as inputs\n    # Each element corresponds to the interpretation scores for the given input\n    return [interpretation]\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n
\n\n

Learn more about Interpretation in the docs.

\n\n

Custom Styling

\n\n

If you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the css parameter of the Interface class.

\n\n
gr.Interface(..., css=\"body {background-color: red}\")\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
gr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n
\n\n

Warning: Custom CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using Themes whenever possible.

\n\n

Loading Hugging Face Models and Spaces

\n\n

Gradio integrates nicely with the Hugging Face Hub, allowing you to load models and Spaces with just one line of code. To use this, simply use the load() method in the Interface class. So:

\n\n
    \n
  • To load any model from the Hugging Face Hub and create an interface around it, you pass \"model/\" or \"huggingface/\" followed by the model name, like these examples:
  • \n
\n\n
gr.Interface.load(\"huggingface/gpt2\").launch();\n
\n\n
gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n    inputs=gr.Textbox(lines=5, label=\"Input Text\")  # customizes the input component\n).launch()\n
\n\n
    \n
  • To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass \"spaces/\" followed by the model name:
  • \n
\n\n
gr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n                  inputs=\"webcam\", \n                  title=\"Remove your webcam background!\").launch()\n
\n\n

One of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting Interface object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):

\n\n
io = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\")  # outputs model completion\n
\n\n

Putting Interfaces in Parallel and Series

\n\n

Gradio also lets you mix interfaces very easily using the gradio.Parallel and gradio.Series classes. Parallel lets you put two similar models (if they have the same input type) in parallel to compare model predictions:

\n\n
generator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n
\n\n

Series lets you put models and spaces in series, piping the output of one model into the input of the next model.

\n\n
generator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch()  \n# this demo generates text, then translates it to German, and outputs the final result.\n
\n\n

And of course, you can also mix Parallel and Series together whenever that makes sense!

\n\n

Learn more about Parallel and Series in the docs.

\n", "tags": [], "spaces": [], "url": "/guides/advanced-interface-features/", "contributor": null}], "parent": "gradio"}, "tabbedinterface": {"class": null, "name": "TabbedInterface", "description": "A TabbedInterface is created by providing a list of Interfaces, each of which gets rendered in a separate tab.", "tags": {"demos": "stt_or_tts"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "interface_list", "annotation": "list[Interface]", "doc": "a list of interfaces to be rendered in tabs."}, {"name": "tab_names", "annotation": "list[str] | None", "doc": "a list of tab names. If None, the tab names will be \"Tab 1\", \"Tab 2\", etc.", "default": "None"}, {"name": "title", "annotation": "str | None", "doc": "a title for the interface; if provided, appears above the input and output components in large font. Also used as the tab title when opened in a browser window.", "default": "None"}, {"name": "theme", "annotation": "Theme | None", "doc": null, "default": "None"}, {"name": "analytics_enabled", "annotation": "bool | None", "doc": "whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable or default to True.", "default": "None"}, {"name": "css", "annotation": "str | None", "doc": "custom css or path to custom css file to apply to entire Blocks", "default": "None"}], "returns": {"annotation": null, "doc": "a Gradio Tabbed Interface for the given interfaces"}, "example": null, "fns": [], "demos": [["stt_or_tts", "import gradio as gr\n\ntts_examples = [\n \"I love learning machine learning\",\n \"How do you do?\",\n]\n\ntts_demo = gr.load(\n \"huggingface/facebook/fastspeech2-en-ljspeech\",\n title=None,\n examples=tts_examples,\n description=\"Give me something to say!\",\n)\n\nstt_demo = gr.load(\n \"huggingface/facebook/wav2vec2-base-960h\",\n title=None,\n inputs=\"mic\",\n description=\"Let me try to guess what you're saying!\",\n)\n\ndemo = gr.TabbedInterface([tts_demo, stt_demo], [\"Text-to-speech\", \"Speech-to-text\"])\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "parent": "gradio"}, "parallel": {"class": null, "name": "Parallel", "description": "Creates a new Interface consisting of multiple Interfaces in parallel (comparing their outputs). The Interfaces to put in Parallel must share the same input components (but can have different output components).
", "tags": {"demos": "interface_parallel, interface_parallel_load", "guides": "advanced-interface-features"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "interfaces", "annotation": "", "doc": "any number of Interface objects that are to be compared in parallel"}, {"name": "options", "annotation": "", "doc": "additional kwargs that are passed into the new Interface object to customize it", "kwargs": true, "args": true}], "returns": {"annotation": null, "doc": "an Interface object comparing the given models"}, "example": null, "fns": [], "demos": [["interface_parallel", "import gradio as gr\n\ngreeter_1 = gr.Interface(lambda name: f\"Hello {name}!\", inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeter 1\"))\ngreeter_2 = gr.Interface(lambda name: f\"Greetings {name}!\", inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeter 2\"))\ndemo = gr.Parallel(greeter_1, greeter_2)\n\nif __name__ == \"__main__\":\n demo.launch()"], ["interface_parallel_load", "import gradio as gr\n\ngenerator1 = gr.load(\"huggingface/gpt2\")\ngenerator2 = gr.load(\"huggingface/gpt2-xl\")\n\ndemo = gr.Parallel(generator1, generator2)\n\nif __name__ == \"__main__\":\n demo.launch()"]], "guides": [{"name": "advanced-interface-features", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 4, "absolute_index": 6, "pretty_name": "Advanced Interface Features", "content": "# Advanced Interface Features\n\nThere's more to cover on the [Interface](https://gradio.app/docs#interface) class. This guide covers all the advanced features: Using [Interpretation](https://gradio.app/docs#interpretation), custom styling, loading from the [Hugging Face Hub](https://hf.co), and using [Parallel](https://gradio.app/docs#parallel) and [Series](https://gradio.app/docs#series). \n\n## Interpreting your Predictions\n\nMost models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the `interpretation` keyword in the `Interface` class to `default`. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:\n\n```python\nimport requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2() # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n```\n\n\nIn addition to `default`, Gradio also includes [Shapley-based interpretation](https://christophm.github.io/interpretable-ml-book/shap.html), which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the `interpretation` parameter to `\"shap\"` (note: also make sure the python package `shap` is installed). Optionally, you can modify the `num_shap` parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:\n\n```python\ngr.Interface(fn=classify_image,\n inputs=image, \n outputs=label, \n interpretation=\"shap\", \n num_shap=5).launch()\n```\n\nThis will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's `default` or `shap` interpretation, the output component must be a `Label`. All common input components are supported. Here is an example with text input.\n\n```python\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=\"default\",\n)\n\ndemo.launch()\n\n```\n\nSo what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.\n\nYou can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.\n\n```python\nimport re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n result = gender_of_sentence(sentence)\n is_male = result[\"male\"] > result[\"female\"]\n interpretation = []\n for word in re.split(\"( )\", sentence):\n score = 0\n token = word.lower()\n if (is_male and token in male_words) or (not is_male and token in female_words):\n score = 1\n elif (is_male and token in female_words) or (\n not is_male and token in male_words\n ):\n score = -1\n interpretation.append((word, score))\n # Output must be a list of lists containing the same number of elements as inputs\n # Each element corresponds to the interpretation scores for the given input\n return [interpretation]\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n```\n\nLearn more about Interpretation in the [docs](https://gradio.app/docs#interpretation). \n\n## Custom Styling\n\nIf you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the `css` parameter of the `Interface` class.\n\n```python\ngr.Interface(..., css=\"body {background-color: red}\")\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\ngr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n```\n\n**Warning**: Custom CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using [Themes](/guides/theming-guide/) whenever possible. \n\n## Loading Hugging Face Models and Spaces\n\nGradio integrates nicely with the [Hugging Face Hub](https://hf.co), allowing you to load models and Spaces with just one line of code. To use this, simply use the `load()` method in the `Interface` class. So:\n\n- To load any model from the Hugging Face Hub and create an interface around it, you pass `\"model/\"` or `\"huggingface/\"` followed by the model name, like these examples:\n\n```python\ngr.Interface.load(\"huggingface/gpt2\").launch();\n```\n\n```python\ngr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n inputs=gr.Textbox(lines=5, label=\"Input Text\") # customizes the input component\n).launch()\n```\n\n- To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass `\"spaces/\"` followed by the model name:\n\n```python\ngr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n inputs=\"webcam\", \n title=\"Remove your webcam background!\").launch()\n```\n\nOne of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting `Interface` object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):\n\n```python\nio = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\") # outputs model completion\n```\n\n## Putting Interfaces in Parallel and Series\n\nGradio also lets you mix interfaces very easily using the `gradio.Parallel` and `gradio.Series` classes. `Parallel` lets you put two similar models (if they have the same input type) in parallel to compare model predictions:\n\n```python\ngenerator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n```\n\n`Series` lets you put models and spaces in series, piping the output of one model into the input of the next model. \n\n```python\ngenerator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch() \n# this demo generates text, then translates it to German, and outputs the final result.\n```\n\nAnd of course, you can also mix `Parallel` and `Series` together whenever that makes sense!\n\nLearn more about Parallel and Series in the [docs](https://gradio.app/docs#parallel). ", "html": "

Advanced Interface Features

\n\n

There's more to cover on the Interface class. This guide covers all the advanced features: Using Interpretation, custom styling, loading from the Hugging Face Hub, and using Parallel and Series.

\n\n

Interpreting your Predictions

\n\n

Most models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the interpretation keyword in the Interface class to default. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:

\n\n
import requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2()  # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n    inp = inp.reshape((-1, 224, 224, 3))\n    inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n    prediction = inception_net.predict(inp).flatten()\n    return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n    fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n
\n\n

In addition to default, Gradio also includes Shapley-based interpretation, which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the interpretation parameter to \"shap\" (note: also make sure the python package shap is installed). Optionally, you can modify the num_shap parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:

\n\n
gr.Interface(fn=classify_image,\n            inputs=image, \n            outputs=label, \n            interpretation=\"shap\", \n            num_shap=5).launch()\n
\n\n

This will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's default or shap interpretation, the output component must be a Label. All common input components are supported. Here is an example with text input.

\n\n
import gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=\"default\",\n)\n\ndemo.launch()\n\n
\n\n

So what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.

\n\n

You can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.

\n\n
import re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n    result = gender_of_sentence(sentence)\n    is_male = result[\"male\"] > result[\"female\"]\n    interpretation = []\n    for word in re.split(\"( )\", sentence):\n        score = 0\n        token = word.lower()\n        if (is_male and token in male_words) or (not is_male and token in female_words):\n            score = 1\n        elif (is_male and token in female_words) or (\n            not is_male and token in male_words\n        ):\n            score = -1\n        interpretation.append((word, score))\n    # Output must be a list of lists containing the same number of elements as inputs\n    # Each element corresponds to the interpretation scores for the given input\n    return [interpretation]\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n
\n\n

Learn more about Interpretation in the docs.

\n\n

Custom Styling

\n\n

If you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the css parameter of the Interface class.

\n\n
gr.Interface(..., css=\"body {background-color: red}\")\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
gr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n
\n\n

Warning: Custom CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using Themes whenever possible.

\n\n

Loading Hugging Face Models and Spaces

\n\n

Gradio integrates nicely with the Hugging Face Hub, allowing you to load models and Spaces with just one line of code. To use this, simply use the load() method in the Interface class. So:

\n\n
    \n
  • To load any model from the Hugging Face Hub and create an interface around it, you pass \"model/\" or \"huggingface/\" followed by the model name, like these examples:
  • \n
\n\n
gr.Interface.load(\"huggingface/gpt2\").launch();\n
\n\n
gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n    inputs=gr.Textbox(lines=5, label=\"Input Text\")  # customizes the input component\n).launch()\n
\n\n
    \n
  • To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass \"spaces/\" followed by the model name:
  • \n
\n\n
gr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n                  inputs=\"webcam\", \n                  title=\"Remove your webcam background!\").launch()\n
\n\n

One of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting Interface object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):

\n\n
io = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\")  # outputs model completion\n
\n\n

Putting Interfaces in Parallel and Series

\n\n

Gradio also lets you mix interfaces very easily using the gradio.Parallel and gradio.Series classes. Parallel lets you put two similar models (if they have the same input type) in parallel to compare model predictions:

\n\n
generator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n
\n\n

Series lets you put models and spaces in series, piping the output of one model into the input of the next model.

\n\n
generator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch()  \n# this demo generates text, then translates it to German, and outputs the final result.\n
\n\n

And of course, you can also mix Parallel and Series together whenever that makes sense!

\n\n

Learn more about Parallel and Series in the docs.

\n", "tags": [], "spaces": [], "url": "/guides/advanced-interface-features/", "contributor": null}], "parent": "gradio"}, "series": {"class": null, "name": "Series", "description": "Creates a new Interface from multiple Interfaces in series (the output of one is fed as the input to the next, and so the input and output components must agree between the interfaces).
", "tags": {"demos": "interface_series, interface_series_load", "guides": "advanced-interface-features"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "interfaces", "annotation": "", "doc": "any number of Interface objects that are to be connected in series"}, {"name": "options", "annotation": "", "doc": "additional kwargs that are passed into the new Interface object to customize it", "kwargs": true, "args": true}], "returns": {"annotation": null, "doc": "an Interface object connecting the given models"}, "example": null, "fns": [], "demos": [["interface_series", "import gradio as gr\n\nget_name = gr.Interface(lambda name: name, inputs=\"textbox\", outputs=\"textbox\")\nprepend_hello = gr.Interface(lambda name: f\"Hello {name}!\", inputs=\"textbox\", outputs=\"textbox\")\nappend_nice = gr.Interface(lambda greeting: f\"{greeting} Nice to meet you!\",\n inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeting\"))\ndemo = gr.Series(get_name, prepend_hello, append_nice)\n\nif __name__ == \"__main__\":\n demo.launch()"], ["interface_series_load", "import gradio as gr\n\ngenerator = gr.load(\"huggingface/gpt2\")\ntranslator = gr.load(\"huggingface/t5-small\")\n\ndemo = gr.Series(generator, translator, description=\"This demo combines two Spaces: a text generator (`huggingface/gpt2`) and a text translator (`huggingface/t5-small`). The first Space takes a prompt as input and generates a text. The second Space takes the generated text as input and translates it into another language.\")\n\nif __name__ == \"__main__\":\n demo.launch()"]], "guides": [{"name": "advanced-interface-features", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 4, "absolute_index": 6, "pretty_name": "Advanced Interface Features", "content": "# Advanced Interface Features\n\nThere's more to cover on the [Interface](https://gradio.app/docs#interface) class. This guide covers all the advanced features: Using [Interpretation](https://gradio.app/docs#interpretation), custom styling, loading from the [Hugging Face Hub](https://hf.co), and using [Parallel](https://gradio.app/docs#parallel) and [Series](https://gradio.app/docs#series). \n\n## Interpreting your Predictions\n\nMost models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the `interpretation` keyword in the `Interface` class to `default`. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:\n\n```python\nimport requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2() # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n```\n\n\nIn addition to `default`, Gradio also includes [Shapley-based interpretation](https://christophm.github.io/interpretable-ml-book/shap.html), which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the `interpretation` parameter to `\"shap\"` (note: also make sure the python package `shap` is installed). Optionally, you can modify the `num_shap` parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:\n\n```python\ngr.Interface(fn=classify_image,\n inputs=image, \n outputs=label, \n interpretation=\"shap\", \n num_shap=5).launch()\n```\n\nThis will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's `default` or `shap` interpretation, the output component must be a `Label`. All common input components are supported. Here is an example with text input.\n\n```python\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=\"default\",\n)\n\ndemo.launch()\n\n```\n\nSo what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.\n\nYou can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.\n\n```python\nimport re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n result = gender_of_sentence(sentence)\n is_male = result[\"male\"] > result[\"female\"]\n interpretation = []\n for word in re.split(\"( )\", sentence):\n score = 0\n token = word.lower()\n if (is_male and token in male_words) or (not is_male and token in female_words):\n score = 1\n elif (is_male and token in female_words) or (\n not is_male and token in male_words\n ):\n score = -1\n interpretation.append((word, score))\n # Output must be a list of lists containing the same number of elements as inputs\n # Each element corresponds to the interpretation scores for the given input\n return [interpretation]\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n```\n\nLearn more about Interpretation in the [docs](https://gradio.app/docs#interpretation). \n\n## Custom Styling\n\nIf you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the `css` parameter of the `Interface` class.\n\n```python\ngr.Interface(..., css=\"body {background-color: red}\")\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\ngr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n```\n\n**Warning**: Custom CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using [Themes](/guides/theming-guide/) whenever possible. \n\n## Loading Hugging Face Models and Spaces\n\nGradio integrates nicely with the [Hugging Face Hub](https://hf.co), allowing you to load models and Spaces with just one line of code. To use this, simply use the `load()` method in the `Interface` class. So:\n\n- To load any model from the Hugging Face Hub and create an interface around it, you pass `\"model/\"` or `\"huggingface/\"` followed by the model name, like these examples:\n\n```python\ngr.Interface.load(\"huggingface/gpt2\").launch();\n```\n\n```python\ngr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n inputs=gr.Textbox(lines=5, label=\"Input Text\") # customizes the input component\n).launch()\n```\n\n- To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass `\"spaces/\"` followed by the model name:\n\n```python\ngr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n inputs=\"webcam\", \n title=\"Remove your webcam background!\").launch()\n```\n\nOne of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting `Interface` object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):\n\n```python\nio = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\") # outputs model completion\n```\n\n## Putting Interfaces in Parallel and Series\n\nGradio also lets you mix interfaces very easily using the `gradio.Parallel` and `gradio.Series` classes. `Parallel` lets you put two similar models (if they have the same input type) in parallel to compare model predictions:\n\n```python\ngenerator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n```\n\n`Series` lets you put models and spaces in series, piping the output of one model into the input of the next model. \n\n```python\ngenerator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch() \n# this demo generates text, then translates it to German, and outputs the final result.\n```\n\nAnd of course, you can also mix `Parallel` and `Series` together whenever that makes sense!\n\nLearn more about Parallel and Series in the [docs](https://gradio.app/docs#parallel). ", "html": "

Advanced Interface Features

\n\n

There's more to cover on the Interface class. This guide covers all the advanced features: Using Interpretation, custom styling, loading from the Hugging Face Hub, and using Parallel and Series.

\n\n

Interpreting your Predictions

\n\n

Most models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the interpretation keyword in the Interface class to default. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:

\n\n
import requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2()  # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n    inp = inp.reshape((-1, 224, 224, 3))\n    inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n    prediction = inception_net.predict(inp).flatten()\n    return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n    fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n
\n\n

In addition to default, Gradio also includes Shapley-based interpretation, which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the interpretation parameter to \"shap\" (note: also make sure the python package shap is installed). Optionally, you can modify the num_shap parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:

\n\n
gr.Interface(fn=classify_image,\n            inputs=image, \n            outputs=label, \n            interpretation=\"shap\", \n            num_shap=5).launch()\n
\n\n

This will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's default or shap interpretation, the output component must be a Label. All common input components are supported. Here is an example with text input.

\n\n
import gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=\"default\",\n)\n\ndemo.launch()\n\n
\n\n

So what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.

\n\n

You can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.

\n\n
import re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n    result = gender_of_sentence(sentence)\n    is_male = result[\"male\"] > result[\"female\"]\n    interpretation = []\n    for word in re.split(\"( )\", sentence):\n        score = 0\n        token = word.lower()\n        if (is_male and token in male_words) or (not is_male and token in female_words):\n            score = 1\n        elif (is_male and token in female_words) or (\n            not is_male and token in male_words\n        ):\n            score = -1\n        interpretation.append((word, score))\n    # Output must be a list of lists containing the same number of elements as inputs\n    # Each element corresponds to the interpretation scores for the given input\n    return [interpretation]\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n
\n\n

Learn more about Interpretation in the docs.

\n\n

Custom Styling

\n\n

If you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the css parameter of the Interface class.

\n\n
gr.Interface(..., css=\"body {background-color: red}\")\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
gr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n
\n\n

Warning: Custom CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using Themes whenever possible.

\n\n

Loading Hugging Face Models and Spaces

\n\n

Gradio integrates nicely with the Hugging Face Hub, allowing you to load models and Spaces with just one line of code. To use this, simply use the load() method in the Interface class. So:

\n\n
    \n
  • To load any model from the Hugging Face Hub and create an interface around it, you pass \"model/\" or \"huggingface/\" followed by the model name, like these examples:
  • \n
\n\n
gr.Interface.load(\"huggingface/gpt2\").launch();\n
\n\n
gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n    inputs=gr.Textbox(lines=5, label=\"Input Text\")  # customizes the input component\n).launch()\n
\n\n
    \n
  • To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass \"spaces/\" followed by the model name:
  • \n
\n\n
gr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n                  inputs=\"webcam\", \n                  title=\"Remove your webcam background!\").launch()\n
\n\n

One of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting Interface object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):

\n\n
io = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\")  # outputs model completion\n
\n\n

Putting Interfaces in Parallel and Series

\n\n

Gradio also lets you mix interfaces very easily using the gradio.Parallel and gradio.Series classes. Parallel lets you put two similar models (if they have the same input type) in parallel to compare model predictions:

\n\n
generator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n
\n\n

Series lets you put models and spaces in series, piping the output of one model into the input of the next model.

\n\n
generator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch()  \n# this demo generates text, then translates it to German, and outputs the final result.\n
\n\n

And of course, you can also mix Parallel and Series together whenever that makes sense!

\n\n

Learn more about Parallel and Series in the docs.

\n", "tags": [], "spaces": [], "url": "/guides/advanced-interface-features/", "contributor": null}], "parent": "gradio"}}, "components": {"annotatedimage": {"class": null, "name": "AnnotatedImage", "description": "Displays a base image and colored subsections on top of that image. Subsections can take the from of rectangles (e.g. object detection) or masks (e.g. image segmentation).
", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects a {Tuple[numpy.ndarray | PIL.Image | str, List[Tuple[numpy.ndarray | Tuple[int, int, int, int], str]]]} consisting of a base image and a list of subsections, that are either (x1, y1, x2, y2) tuples identifying object boundaries, or 0-1 confidence masks of the same shape as the image. A label is provided for each subsection.", "demos": "image_segmentation"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "tuple[np.ndarray | _Image.Image | str, list[tuple[np.ndarray | tuple[int, int, int, int], str]]] | None", "doc": "Tuple of base image and list of (subsection, label) pairs.", "default": "None"}, {"name": "show_legend", "annotation": "bool", "doc": "If True, will show a legend of the subsections.", "default": "True"}, {"name": "height", "annotation": "int | None", "doc": "Height of the displayed image.", "default": "None"}, {"name": "width", "annotation": "int | None", "doc": "Width of the displayed image.", "default": "None"}, {"name": "color_map", "annotation": "dict[str, str] | None", "doc": "A dictionary mapping labels to colors. The colors must be specified as hex codes.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "select", "description": "Event listener for when the user selects Image subsection. Uses event data gradio.SelectData to carry `value` referring to selected subsection label, and `index` to refer to subsection index. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.AnnotatedImage"}], "string_shortcuts": [["AnnotatedImage", "annotatedimage", "Uses default values"]], "demos": [["image_segmentation", "import gradio as gr\nimport numpy as np\nimport random\n\nwith gr.Blocks() as demo:\n section_labels = [\n \"apple\",\n \"banana\",\n \"carrot\",\n \"donut\",\n \"eggplant\",\n \"fish\",\n \"grapes\",\n \"hamburger\",\n \"ice cream\",\n \"juice\",\n ]\n\n with gr.Row():\n num_boxes = gr.Slider(0, 5, 2, step=1, label=\"Number of boxes\")\n num_segments = gr.Slider(0, 5, 1, step=1, label=\"Number of segments\")\n\n with gr.Row():\n img_input = gr.Image()\n img_output = gr.AnnotatedImage().style(\n color_map={\"banana\": \"#a89a00\", \"carrot\": \"#ffae00\"}\n )\n\n section_btn = gr.Button(\"Identify Sections\")\n selected_section = gr.Textbox(label=\"Selected Section\")\n\n def section(img, num_boxes, num_segments):\n sections = []\n for a in range(num_boxes):\n x = random.randint(0, img.shape[1])\n y = random.randint(0, img.shape[0])\n w = random.randint(0, img.shape[1] - x)\n h = random.randint(0, img.shape[0] - y)\n sections.append(((x, y, x + w, y + h), section_labels[a]))\n for b in range(num_segments):\n x = random.randint(0, img.shape[1])\n y = random.randint(0, img.shape[0])\n r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))\n mask = np.zeros(img.shape[:2])\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n dist_square = (i - y) ** 2 + (j - x) ** 2\n if dist_square < r**2:\n mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4\n sections.append((mask, section_labels[b + num_boxes]))\n return (img, sections)\n\n section_btn.click(section, [img_input, num_boxes, num_segments], img_output)\n\n def select_section(evt: gr.SelectData):\n return section_labels[evt.index]\n\n img_output.select(select_section, None, selected_section)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects a Tuple[numpy.ndarray | PIL.Image | str, List[Tuple[numpy.ndarray | Tuple[int, int, int, int], str]]] consisting of a base image and a list of subsections, that are either (x1, y1, x2, y2) tuples identifying object boundaries, or 0-1 confidence masks of the same shape as the image. A label is provided for each subsection.", "parent": "gradio", "prev_obj": "Components", "next_obj": "Audio"}, "audio": {"class": null, "name": "Audio", "description": "Creates an audio component that can be used to upload/record audio (as an input) or display audio (as an output).", "tags": {"preprocessing": "passes the uploaded audio as a {Tuple(int, numpy.array)} corresponding to (sample rate in Hz, audio data as a 16-bit int array whose values range from -32768 to 32767), or as a {str} filepath, depending on `type`.", "postprocessing": "expects a {Tuple(int, numpy.array)} corresponding to (sample rate in Hz, audio data as a float or int numpy array) or as a {str} or {pathlib.Path} filepath or URL to an audio file, which gets displayed", "examples-format": "a {str} filepath to a local file that contains audio.", "demos": "main_note, generate_tone, reverse_audio", "guides": "real-time-speech-recognition"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | Path | tuple[int, np.ndarray] | Callable | None", "doc": "A path, URL, or [sample_rate, numpy array] tuple (sample rate in Hz, audio data as a float or int numpy array) for the default value that Audio component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "source", "annotation": "Literal['upload', 'microphone']", "doc": "Source of audio. \"upload\" creates a box where user can drop an audio file, \"microphone\" creates a microphone input.", "default": "\"upload\""}, {"name": "type", "annotation": "Literal['numpy', 'filepath']", "doc": "The format the audio file is converted to before being passed into the prediction function. \"numpy\" converts the audio to a tuple consisting of: (int sample rate, numpy.array for the data), \"filepath\" passes a str path to a temporary file containing the audio.", "default": "\"numpy\""}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, will allow users to upload and edit a audio file; if False, can only be used to play audio. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "streaming", "annotation": "bool", "doc": "If set to True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'microphone'.", "default": "False"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "format", "annotation": "Literal['wav', 'mp3']", "doc": "The file format to save audio files. Either 'wav' or 'mp3'. wav files are lossless but will tend to be larger files. mp3 files tend to be smaller. Default is wav. Applies both when this component is used as an input (when `type` is \"format\") and when this component is used as an output.", "default": "\"wav\""}, {"name": "autoplay", "annotation": "bool", "doc": "Whether to automatically play the audio when the component is used as an output. Note: browsers will not autoplay audio files if the user has not interacted with the page yet.", "default": "False"}, {"name": "show_download_button", "annotation": "", "doc": "If True, will show a download button in the corner of the component for saving audio. If False, icon does not appear.", "default": "True"}, {"name": "show_share_button", "annotation": "bool | None", "doc": "If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Audio"}, {"fn": null, "name": "clear", "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Audio"}, {"fn": null, "name": "play", "description": "This listener is triggered when the user plays the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Audio"}, {"fn": null, "name": "pause", "description": "This listener is triggered when the media stops playing for any reason (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Audio"}, {"fn": null, "name": "stop", "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Audio"}, {"fn": null, "name": "end", "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Audio"}, {"fn": null, "name": "stream", "description": "This listener is triggered when the user streams the component (e.g. a live webcam component). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Audio"}, {"fn": null, "name": "start_recording", "description": "This listener is triggered when the user starts recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Audio"}, {"fn": null, "name": "stop_recording", "description": "This listener is triggered when the user stops recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Audio"}, {"fn": null, "name": "upload", "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Audio"}], "string_shortcuts": [["Audio", "audio", "Uses default values"], ["Microphone", "microphone", "Uses source=\"microphone\""]], "demos": [["main_note", "from math import log2, pow\nimport os\n\nimport numpy as np\nfrom scipy.fftpack import fft\n\nimport gradio as gr\n\nA4 = 440\nC0 = A4 * pow(2, -4.75)\nname = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n\n\ndef get_pitch(freq):\n h = round(12 * log2(freq / C0))\n n = h % 12\n return name[n]\n\n\ndef main_note(audio):\n rate, y = audio\n if len(y.shape) == 2:\n y = y.T[0]\n N = len(y)\n T = 1.0 / rate\n yf = fft(y)\n yf2 = 2.0 / N * np.abs(yf[0 : N // 2])\n xf = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)\n\n volume_per_pitch = {}\n total_volume = np.sum(yf2)\n for freq, volume in zip(xf, yf2):\n if freq == 0:\n continue\n pitch = get_pitch(freq)\n if pitch not in volume_per_pitch:\n volume_per_pitch[pitch] = 0\n volume_per_pitch[pitch] += 1.0 * volume / total_volume\n volume_per_pitch = {k: float(v) for k, v in volume_per_pitch.items()}\n return volume_per_pitch\n\n\ndemo = gr.Interface(\n main_note,\n gr.Audio(source=\"microphone\"),\n gr.Label(num_top_classes=4),\n examples=[\n [os.path.join(os.path.dirname(__file__),\"audio/recording1.wav\")],\n [os.path.join(os.path.dirname(__file__),\"audio/cantina.wav\")],\n ],\n interpretation=\"default\",\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["generate_tone", "import numpy as np\nimport gradio as gr\n\nnotes = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n\ndef generate_tone(note, octave, duration):\n sr = 48000\n a4_freq, tones_from_a4 = 440, 12 * (octave - 4) + (note - 9)\n frequency = a4_freq * 2 ** (tones_from_a4 / 12)\n duration = int(duration)\n audio = np.linspace(0, duration, duration * sr)\n audio = (20000 * np.sin(audio * (2 * np.pi * frequency))).astype(np.int16)\n return sr, audio\n\ndemo = gr.Interface(\n generate_tone,\n [\n gr.Dropdown(notes, type=\"index\"),\n gr.Slider(4, 6, step=1),\n gr.Textbox(value=1, label=\"Duration in seconds\"),\n ],\n \"audio\",\n)\nif __name__ == \"__main__\":\n demo.launch()\n"], ["reverse_audio", "import os\n\nimport numpy as np\n\nimport gradio as gr\n\n\ndef reverse_audio(audio):\n sr, data = audio\n return (sr, np.flipud(data))\n\n\ndemo = gr.Interface(fn=reverse_audio, \n inputs=\"microphone\", \n outputs=\"audio\", \n examples=[\n \"https://samplelib.com/lib/preview/mp3/sample-3s.mp3\",\n os.path.join(os.path.dirname(__file__), \"audio/recording1.wav\")\n ], cache_examples=True)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [{"name": "real-time-speech-recognition", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 40, "pretty_name": "Real Time Speech Recognition", "content": "# Real Time Speech Recognition \n\n\n\n\n## Introduction\n\nAutomatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).\n\nUsing `gradio`, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.\n\nThis tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a ***full-context*** model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it ***streaming***, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused)!):\n\n\n\nReal-time ASR is inherently *stateful*, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use **state** with Gradio demos. \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:\n\n* Transformers (for this, `pip install transformers` and `pip install torch`) \n* DeepSpeech (`pip install deepspeech==0.8.2`)\n\nMake sure you have at least one of these installed so that you can follow along the tutorial. You will also need `ffmpeg` [installed on your system](https://www.ffmpeg.org/download.html), if you do not already have it, to process files from the microphone.\n\nHere's how to build a real time speech recognition (ASR) app: \n\n1. [Set up the Transformers ASR Model](#1-set-up-the-transformers-asr-model)\n2. [Create a Full-Context ASR Demo with Transformers](#2-create-a-full-context-asr-demo-with-transformers) \n3. [Create a Streaming ASR Demo with Transformers](#3-create-a-streaming-asr-demo-with-transformers)\n4. [Create a Streaming ASR Demo with DeepSpeech](#4-create-a-streaming-asr-demo-with-deep-speech)\n\n\n## 1. Set up the Transformers ASR Model\n\nFirst, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, `Wav2Vec2`. \n\nHere is the code to load `Wav2Vec2` from Hugging Face `transformers`.\n\n```python\nfrom transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n```\n\nThat's it! By default, the automatic speech recognition model pipeline loads Facebook's `facebook/wav2vec2-base-960h` model.\n\n## 2. Create a Full-Context ASR Demo with Transformers \n\nWe will start by creating a *full-context* ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the `pipeline` object above.\n\nWe will use `gradio`'s built in `Audio` component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain `Textbox`.\n\n```python\nimport gradio as gr\n\ndef transcribe(audio):\n text = p(audio)[\"text\"]\n return text\n\ngr.Interface(\n fn=transcribe, \n inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n outputs=\"text\").launch()\n```\n\nSo what's happening here? The `transcribe` function takes a single parameter, `audio`, which is a filepath to the audio file that the user has recorded. The `pipeline` object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox. \n\nLet's see it in action! (Record a short audio clip and then click submit, or [open in a new tab](https://huggingface.co/spaces/abidlabs/full-context-asr)):\n\n\n\n## 3. Create a Streaming ASR Demo with Transformers\n\nOk great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a *streaming* interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.\n\nThe good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same `Wav2Vec2` model. \n\nThe biggest change is that we must now introduce a `state` parameter, which holds the audio that has been *transcribed so far*. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed. \n\nWhen adding state to a Gradio demo, you need to do a total of 3 things:\n\n* Add a `state` parameter to the function\n* Return the updated `state` at the end of the function\n* Add the `\"state\"` components to the `inputs` and `outputs` in `Interface` \n\nHere's what the code looks like:\n\n```python\ndef transcribe(audio, state=\"\"):\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\" \n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nNotice that we've also made one other change, which is that we've set `live=True`. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.\n\nLet's see how it does (try below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr))!\n\n\n\n\nOne thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the `transcribe()` function so that longer audio chunks are processed. We can do this by adding a `time.sleep()` inside the function, as shown below (we'll see a proper fix next) \n\n```python\nfrom transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n time.sleep(2)\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\"\n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nTry the demo below to see the difference (or [open in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused))!\n\n\n\n\n## 4. Create a Streaming ASR Demo with DeepSpeech\n\nYou're not restricted to ASR models from the `transformers` library -- you can use your own models or models from other libraries. The `DeepSpeech` library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.\n\nGoing through the DeepSpeech library is beyond the scope of this Guide (check out their [excellent documentation here](https://deepspeech.readthedocs.io/en/r0.9/)), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model. \n\nHere's a complete example (on Linux):\n\nFirst install the DeepSpeech library and download the pretrained models from the terminal:\n\n```bash\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n```\n\nThen, create a similar `transcribe()` function as before:\n\n```python\nfrom deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n if sr not in (\n 48000,\n 16000,\n ): # Deepspeech only supports 16k, (we convert 48k -> 16k)\n raise ValueError(\"Unsupported rate\", sr)\n if sr == 48000:\n y = (\n ((y / max(np.max(y), 1)) * 32767)\n .reshape((-1, 3))\n .mean(axis=1)\n .astype(\"int16\")\n )\n sr = 16000\n return sr, y\n\n\ndef transcribe(speech, stream):\n _, y = reformat_freq(*speech)\n if stream is None:\n stream = model.createStream()\n stream.feedAudioContent(y)\n text = stream.intermediateDecode()\n return text, stream\n\n```\n\nThen, create a Gradio Interface as before (the only difference being that the return type should be `numpy` instead of a `filepath` to be compatible with the DeepSpeech models)\n\n```python\nimport gradio as gr\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"numpy\"), \n \"state\" \n ], \n outputs= [\n \"text\", \n \"state\"\n ], \n live=True).launch()\n```\n\nRunning all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.\n\n--------------------------------------------\n\n\nAnd you're done! That's all the code you need to build a web-based GUI for your ASR model. \n\nFun tip: you can share your ASR model instantly with others simply by setting `share=True` in `launch()`. \n\n\n", "html": "

Real Time Speech Recognition

\n\n

Introduction

\n\n

Automatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).

\n\n

Using gradio, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.

\n\n

This tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a full-context model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it streaming, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or in a new tab!):

\n\n\n\n

Real-time ASR is inherently stateful, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use state with Gradio demos.

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:

\n\n
    \n
  • Transformers (for this, pip install transformers and pip install torch)
  • \n
  • DeepSpeech (pip install deepspeech==0.8.2)
  • \n
\n\n

Make sure you have at least one of these installed so that you can follow along the tutorial. You will also need ffmpeg installed on your system, if you do not already have it, to process files from the microphone.

\n\n

Here's how to build a real time speech recognition (ASR) app:

\n\n
    \n
  1. Set up the Transformers ASR Model
  2. \n
  3. Create a Full-Context ASR Demo with Transformers
  4. \n
  5. Create a Streaming ASR Demo with Transformers
  6. \n
  7. Create a Streaming ASR Demo with DeepSpeech
  8. \n
\n\n

1. Set up the Transformers ASR Model

\n\n

First, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, Wav2Vec2.

\n\n

Here is the code to load Wav2Vec2 from Hugging Face transformers.

\n\n
from transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n
\n\n

That's it! By default, the automatic speech recognition model pipeline loads Facebook's facebook/wav2vec2-base-960h model.

\n\n

2. Create a Full-Context ASR Demo with Transformers

\n\n

We will start by creating a full-context ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the pipeline object above.

\n\n

We will use gradio's built in Audio component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain Textbox.

\n\n
import gradio as gr\n\ndef transcribe(audio):\n    text = p(audio)[\"text\"]\n    return text\n\ngr.Interface(\n    fn=transcribe, \n    inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n    outputs=\"text\").launch()\n
\n\n

So what's happening here? The transcribe function takes a single parameter, audio, which is a filepath to the audio file that the user has recorded. The pipeline object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox.

\n\n

Let's see it in action! (Record a short audio clip and then click submit, or open in a new tab):

\n\n\n\n

3. Create a Streaming ASR Demo with Transformers

\n\n

Ok great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a streaming interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.

\n\n

The good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same Wav2Vec2 model.

\n\n

The biggest change is that we must now introduce a state parameter, which holds the audio that has been transcribed so far. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed.

\n\n

When adding state to a Gradio demo, you need to do a total of 3 things:

\n\n
    \n
  • Add a state parameter to the function
  • \n
  • Return the updated state at the end of the function
  • \n
  • Add the \"state\" components to the inputs and outputs in Interface
  • \n
\n\n

Here's what the code looks like:

\n\n
def transcribe(audio, state=\"\"):\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\" \n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Notice that we've also made one other change, which is that we've set live=True. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.

\n\n

Let's see how it does (try below or in a new tab)!

\n\n\n\n

One thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the transcribe() function so that longer audio chunks are processed. We can do this by adding a time.sleep() inside the function, as shown below (we'll see a proper fix next)

\n\n
from transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n    time.sleep(2)\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\"\n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Try the demo below to see the difference (or open in a new tab)!

\n\n\n\n

4. Create a Streaming ASR Demo with DeepSpeech

\n\n

You're not restricted to ASR models from the transformers library -- you can use your own models or models from other libraries. The DeepSpeech library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.

\n\n

Going through the DeepSpeech library is beyond the scope of this Guide (check out their excellent documentation here), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model.

\n\n

Here's a complete example (on Linux):

\n\n

First install the DeepSpeech library and download the pretrained models from the terminal:

\n\n
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n
\n\n

Then, create a similar transcribe() function as before:

\n\n
from deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n    if sr not in (\n        48000,\n        16000,\n    ):  # Deepspeech only supports 16k, (we convert 48k -> 16k)\n        raise ValueError(\"Unsupported rate\", sr)\n    if sr == 48000:\n        y = (\n            ((y / max(np.max(y), 1)) * 32767)\n            .reshape((-1, 3))\n            .mean(axis=1)\n            .astype(\"int16\")\n        )\n        sr = 16000\n    return sr, y\n\n\ndef transcribe(speech, stream):\n    _, y = reformat_freq(*speech)\n    if stream is None:\n        stream = model.createStream()\n    stream.feedAudioContent(y)\n    text = stream.intermediateDecode()\n    return text, stream\n\n
\n\n

Then, create a Gradio Interface as before (the only difference being that the return type should be numpy instead of a filepath to be compatible with the DeepSpeech models)

\n\n
import gradio as gr\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"numpy\"), \n        \"state\" \n    ], \n    outputs= [\n        \"text\", \n        \"state\"\n    ], \n    live=True).launch()\n
\n\n

Running all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.

\n\n
\n\n

And you're done! That's all the code you need to build a web-based GUI for your ASR model.

\n\n

Fun tip: you can share your ASR model instantly with others simply by setting share=True in launch().

\n", "tags": ["ASR", "SPEECH", "STREAMING"], "spaces": ["https://huggingface.co/spaces/abidlabs/streaming-asr-paused", "https://huggingface.co/spaces/abidlabs/full-context-asr"], "url": "/guides/real-time-speech-recognition/", "contributor": null}], "preprocessing": "passes the uploaded audio as a Tuple(int, numpy.array) corresponding to (sample rate in Hz, audio data as a 16-bit int array whose values range from -32768 to 32767), or as a str filepath, depending on `type`.", "postprocessing": "expects a Tuple(int, numpy.array) corresponding to (sample rate in Hz, audio data as a float or int numpy array) or as a str or pathlib.Path filepath or URL to an audio file, which gets displayed", "examples-format": "a str filepath to a local file that contains audio.", "parent": "gradio", "prev_obj": "AnnotatedImage", "next_obj": "BarPlot"}, "barplot": {"class": null, "name": "BarPlot", "description": "Create a bar plot.

", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects a pandas dataframe with the data to plot.", "demos": "bar_plot, chicago-bikeshare-dashboard"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "pd.DataFrame | Callable | None", "doc": "The pandas dataframe containing the data to display in a scatter plot.", "default": "None"}, {"name": "x", "annotation": "str | None", "doc": "Column corresponding to the x axis.", "default": "None"}, {"name": "y", "annotation": "str | None", "doc": "Column corresponding to the y axis.", "default": "None"}, {"name": "color", "annotation": "str | None", "doc": "The column to determine the bar color. Must be categorical (discrete values).", "default": "None"}, {"name": "vertical", "annotation": "bool", "doc": "If True, the bars will be displayed vertically. If False, the x and y axis will be switched, displaying the bars horizontally. Default is True.", "default": "True"}, {"name": "group", "annotation": "str | None", "doc": "The column with which to split the overall plot into smaller subplots.", "default": "None"}, {"name": "title", "annotation": "str | None", "doc": "The title to display on top of the chart.", "default": "None"}, {"name": "tooltip", "annotation": "list[str] | str | None", "doc": "The column (or list of columns) to display on the tooltip when a user hovers over a bar.", "default": "None"}, {"name": "x_title", "annotation": "str | None", "doc": "The title given to the x axis. By default, uses the value of the x parameter.", "default": "None"}, {"name": "y_title", "annotation": "str | None", "doc": "The title given to the y axis. By default, uses the value of the y parameter.", "default": "None"}, {"name": "color_legend_title", "annotation": "str | None", "doc": "The title given to the color legend. By default, uses the value of color parameter.", "default": "None"}, {"name": "group_title", "annotation": "str | None", "doc": "The label displayed on top of the subplot columns (or rows if vertical=True). Use an empty string to omit.", "default": "None"}, {"name": "color_legend_position", "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", "doc": "The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", "default": "None"}, {"name": "height", "annotation": "int | None", "doc": "The height of the plot in pixels.", "default": "None"}, {"name": "width", "annotation": "int | None", "doc": "The width of the plot in pixels.", "default": "None"}, {"name": "y_lim", "annotation": "list[int] | None", "doc": "A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].", "default": "None"}, {"name": "caption", "annotation": "str | None", "doc": "The (optional) caption to display below the plot.", "default": "None"}, {"name": "interactive", "annotation": "bool | None", "doc": "Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.", "default": "True"}, {"name": "label", "annotation": "str | None", "doc": "The (optional) label to display on the top left corner of the plot.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "Whether the label should be displayed.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": null, "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": null, "default": "None"}, {"name": "min_width", "annotation": "int", "doc": null, "default": "160"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "Whether the plot should be visible.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.BarPlot"}, {"fn": null, "name": "clear", "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.BarPlot"}], "string_shortcuts": [["BarPlot", "barplot", "Uses default values"]], "demos": [["bar_plot", "import gradio as gr\nimport pandas as pd\nimport random\n\nsimple = pd.DataFrame(\n {\n \"a\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\"],\n \"b\": [28, 55, 43, 91, 81, 53, 19, 87, 52],\n }\n)\n\nfake_barley = pd.DataFrame(\n {\n \"site\": [\n random.choice(\n [\n \"University Farm\",\n \"Waseca\",\n \"Morris\",\n \"Crookston\",\n \"Grand Rapids\",\n \"Duluth\",\n ]\n )\n for _ in range(120)\n ],\n \"yield\": [random.randint(25, 75) for _ in range(120)],\n \"variety\": [\n random.choice(\n [\n \"Manchuria\",\n \"Wisconsin No. 38\",\n \"Glabron\",\n \"No. 457\",\n \"No. 462\",\n \"No. 475\",\n ]\n )\n for _ in range(120)\n ],\n \"year\": [\n random.choice(\n [\n \"1931\",\n \"1932\",\n ]\n )\n for _ in range(120)\n ],\n }\n)\n\n\ndef bar_plot_fn(display):\n if display == \"simple\":\n return gr.BarPlot.update(\n simple,\n x=\"a\",\n y=\"b\",\n title=\"Simple Bar Plot with made up data\",\n tooltip=[\"a\", \"b\"],\n y_lim=[20, 100],\n )\n elif display == \"stacked\":\n return gr.BarPlot.update(\n fake_barley,\n x=\"variety\",\n y=\"yield\",\n color=\"site\",\n title=\"Barley Yield Data\",\n tooltip=[\"variety\", \"site\"],\n )\n elif display == \"grouped\":\n return gr.BarPlot.update(\n fake_barley.astype({\"year\": str}),\n x=\"year\",\n y=\"yield\",\n color=\"year\",\n group=\"site\",\n title=\"Barley Yield by Year and Site\",\n group_title=\"\",\n tooltip=[\"yield\", \"site\", \"year\"],\n )\n elif display == \"simple-horizontal\":\n return gr.BarPlot.update(\n simple,\n x=\"a\",\n y=\"b\",\n x_title=\"Variable A\",\n y_title=\"Variable B\",\n title=\"Simple Bar Plot with made up data\",\n tooltip=[\"a\", \"b\"],\n vertical=False,\n y_lim=[20, 100],\n )\n elif display == \"stacked-horizontal\":\n return gr.BarPlot.update(\n fake_barley,\n x=\"variety\",\n y=\"yield\",\n color=\"site\",\n title=\"Barley Yield Data\",\n vertical=False,\n tooltip=[\"variety\", \"site\"],\n )\n elif display == \"grouped-horizontal\":\n return gr.BarPlot.update(\n fake_barley.astype({\"year\": str}),\n x=\"year\",\n y=\"yield\",\n color=\"year\",\n group=\"site\",\n title=\"Barley Yield by Year and Site\",\n group_title=\"\",\n tooltip=[\"yield\", \"site\", \"year\"],\n vertical=False,\n )\n\n\nwith gr.Blocks() as bar_plot:\n with gr.Row():\n with gr.Column():\n display = gr.Dropdown(\n choices=[\n \"simple\",\n \"stacked\",\n \"grouped\",\n \"simple-horizontal\",\n \"stacked-horizontal\",\n \"grouped-horizontal\",\n ],\n value=\"simple\",\n label=\"Type of Bar Plot\",\n )\n with gr.Column():\n plot = gr.BarPlot()\n display.change(bar_plot_fn, inputs=display, outputs=plot)\n bar_plot.load(fn=bar_plot_fn, inputs=display, outputs=plot)\n\nbar_plot.launch()\n"], ["chicago-bikeshare-dashboard", "import os\nimport gradio as gr\nimport pandas as pd\n\nDB_USER = os.getenv(\"DB_USER\")\nDB_PASSWORD = os.getenv(\"DB_PASSWORD\")\nDB_HOST = os.getenv(\"DB_HOST\")\nPORT = 8080\nDB_NAME = \"bikeshare\"\n\nconnection_string = (\n f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}\"\n)\n\n\ndef get_count_ride_type():\n df = pd.read_sql(\n \"\"\"\n SELECT COUNT(ride_id) as n, rideable_type\n FROM rides\n GROUP BY rideable_type\n ORDER BY n DESC\n \"\"\",\n con=connection_string,\n )\n return df\n\n\ndef get_most_popular_stations():\n\n df = pd.read_sql(\n \"\"\"\n SELECT COUNT(ride_id) as n, MAX(start_station_name) as station\n FROM RIDES\n WHERE start_station_name is NOT NULL\n GROUP BY start_station_id\n ORDER BY n DESC\n LIMIT 5\n \"\"\",\n con=connection_string,\n )\n return df\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Chicago Bike Share Dashboard\n \n This demo pulls Chicago bike share data for March 2022 from a postgresql database hosted on AWS.\n This demo uses psycopg2 but any postgresql client library (SQLAlchemy)\n is compatible with gradio.\n \n Connection credentials are handled by environment variables\n defined as secrets in the Space.\n\n If data were added to the database, the plots in this demo would update\n whenever the webpage is reloaded.\n \n This demo serves as a starting point for your database-connected apps!\n \"\"\"\n )\n with gr.Row():\n bike_type = gr.BarPlot(\n x=\"rideable_type\",\n y='n',\n title=\"Number of rides per bicycle type\",\n y_title=\"Number of Rides\",\n x_title=\"Bicycle Type\",\n vertical=False,\n tooltip=['rideable_type', \"n\"],\n height=300,\n width=300,\n )\n station = gr.BarPlot(\n x='station',\n y='n',\n title=\"Most Popular Stations\",\n y_title=\"Number of Rides\",\n x_title=\"Station Name\",\n vertical=False,\n tooltip=['station', 'n'],\n height=300,\n width=300\n )\n\n demo.load(get_count_ride_type, inputs=None, outputs=bike_type)\n demo.load(get_most_popular_stations, inputs=None, outputs=station)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects a pandas dataframe with the data to plot.", "parent": "gradio", "prev_obj": "Audio", "next_obj": "Button"}, "button": {"class": null, "name": "Button", "description": "Used to create a button, that can be assigned arbitrary click() events. The label (value) of the button can be used as an input or set via the output of a function.
", "tags": {"preprocessing": "passes the button value as a {str} into the function", "postprocessing": "expects a {str} to be returned from a function, which is set as the label of the button", "demos": "blocks_inputs, blocks_kinematics"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | Callable", "doc": "Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "\"Run\""}, {"name": "variant", "annotation": "Literal['primary', 'secondary', 'stop']", "doc": "'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button.", "default": "\"secondary\""}, {"name": "size", "annotation": "Literal['sm', 'lg'] | None", "doc": "Size of the button. Can be \"sm\" or \"lg\".", "default": "None"}, {"name": "icon", "annotation": "str | None", "doc": "URL or path to the icon file to display within the button. If None, no icon will be displayed.", "default": "None"}, {"name": "link", "annotation": "str | None", "doc": "URL to open when the button is clicked. If None, no link will be used.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "interactive", "annotation": "bool", "doc": "If False, the Button will be in a disabled state.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int | None", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "click", "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Button"}], "string_shortcuts": [["Button", "button", "Uses default values"], ["ClearButton", "clearbutton", "Uses default values"], ["DuplicateButton", "duplicatebutton", "Uses default values"]], "demos": [["blocks_inputs", "import gradio as gr\nimport os\n\n\ndef combine(a, b):\n return a + \" \" + b\n\n\ndef mirror(x):\n return x\n\n\nwith gr.Blocks() as demo:\n\n txt = gr.Textbox(label=\"Input\", lines=2)\n txt_2 = gr.Textbox(label=\"Input 2\")\n txt_3 = gr.Textbox(value=\"\", label=\"Output\")\n btn = gr.Button(value=\"Submit\")\n btn.click(combine, inputs=[txt, txt_2], outputs=[txt_3])\n\n with gr.Row():\n im = gr.Image()\n im_2 = gr.Image()\n\n btn = gr.Button(value=\"Mirror Image\")\n btn.click(mirror, inputs=[im], outputs=[im_2])\n\n gr.Markdown(\"## Text Examples\")\n gr.Examples(\n [[\"hi\", \"Adam\"], [\"hello\", \"Eve\"]],\n [txt, txt_2],\n txt_3,\n combine,\n cache_examples=True,\n )\n gr.Markdown(\"## Image Examples\")\n gr.Examples(\n examples=[os.path.join(os.path.dirname(__file__), \"lion.jpg\")],\n inputs=im,\n outputs=im_2,\n fn=mirror,\n cache_examples=True,\n )\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["blocks_kinematics", "import pandas as pd\nimport numpy as np\n\nimport gradio as gr\n\n\ndef plot(v, a):\n g = 9.81\n theta = a / 180 * 3.14\n tmax = ((2 * v) * np.sin(theta)) / g\n timemat = tmax * np.linspace(0, 1, 40)\n\n x = (v * timemat) * np.cos(theta)\n y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat**2))\n df = pd.DataFrame({\"x\": x, \"y\": y})\n return df\n\n\ndemo = gr.Blocks()\n\nwith demo:\n gr.Markdown(\n r\"Let's do some kinematics! Choose the speed and angle to see the trajectory. Remember that the range $R = v_0^2 \\cdot \\frac{\\sin(2\\theta)}{g}$\"\n )\n\n with gr.Row():\n speed = gr.Slider(1, 30, 25, label=\"Speed\")\n angle = gr.Slider(0, 90, 45, label=\"Angle\")\n output = gr.LinePlot(\n x=\"x\",\n y=\"y\",\n overlay_point=True,\n tooltip=[\"x\", \"y\"],\n x_lim=[0, 100],\n y_lim=[0, 60],\n width=350,\n height=300,\n )\n btn = gr.Button(value=\"Run\")\n btn.click(plot, [speed, angle], output)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "passes the button value as a str into the function", "postprocessing": "expects a str to be returned from a function, which is set as the label of the button", "parent": "gradio", "prev_obj": "BarPlot", "next_obj": "Chatbot"}, "chatbot": {"class": null, "name": "Chatbot", "description": "Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, tables. Also supports audio/video/image files, which are displayed in the Chatbot, and other kinds of files which are displayed as links.
", "tags": {"preprocessing": "passes the messages in the Chatbot as a {List[List[str | None | Tuple]]}, i.e. a list of lists. The inner list has 2 elements: the user message and the response message. See `Postprocessing` for the format of these messages.", "postprocessing": "expects function to return a {List[List[str | None | Tuple]]}, i.e. a list of lists. The inner list should have 2 elements: the user message and the response message. The individual messages can be (1) strings in valid Markdown, (2) tuples if sending files: (a filepath or URL to a file, [optional string alt text]) -- if the file is image/video/audio, it is displayed in the Chatbot, or (3) None, in which case the message is not displayed.", "demos": "chatbot_simple, chatbot_multimodal", "guides": "creating-a-chatbot"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "list[list[str | tuple[str] | tuple[str | Path, str] | None]] | Callable | None", "doc": "Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "color_map", "annotation": "dict[str, str] | None", "doc": "This parameter is deprecated.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "height", "annotation": "int | None", "doc": "height of the component in pixels.", "default": "None"}, {"name": "latex_delimiters", "annotation": "list[dict[str, str | bool]] | None", "doc": "A list of dicts of the form {\"left\": open delimiter (str), \"right\": close delimiter (str), \"display\": whether to display in newline (bool)} that will be used to render LaTeX expressions. If not provided, `latex_delimiters` is set to `[{ \"left\": \"$$\", \"right\": \"$$\", \"display\": True }]`, so only expressions enclosed in $$ delimiters will be rendered as LaTeX, and in a new line. Pass in an empty list to disable LaTeX rendering. For more information, see the [KaTeX documentation](https://katex.org/docs/autorender.html).", "default": "None"}, {"name": "rtl", "annotation": "bool", "doc": "If True, sets the direction of the rendered text to right-to-left. Default is False, which renders text left-to-right.", "default": "False"}, {"name": "show_share_button", "annotation": "bool | None", "doc": "If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Chatbot"}, {"fn": null, "name": "select", "description": "Event listener for when the user selects message from Chatbot. Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Chatbot"}], "string_shortcuts": [["Chatbot", "chatbot", "Uses default values"]], "demos": [["chatbot_simple", "import gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.ClearButton([msg, chatbot])\n\n def respond(message, chat_history):\n bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n chat_history.append((message, bot_message))\n time.sleep(2)\n return \"\", chat_history\n\n msg.submit(respond, [msg, chatbot], [msg, chatbot])\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["chatbot_multimodal", "import gradio as gr\nimport random\nimport time\n\n# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n\ndef add_text(history, text):\n history = history + [(text, None)]\n return history, gr.update(value=\"\", interactive=False)\n\n\ndef add_file(history, file):\n history = history + [((file.name,), None)]\n return history\n\n\ndef bot(history):\n response = \"**That's cool!**\"\n history[-1][1] = \"\"\n for character in response:\n history[-1][1] += character\n time.sleep(0.05)\n yield history\n\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot([], elem_id=\"chatbot\").style(height=750)\n\n with gr.Row():\n with gr.Column(scale=0.85):\n txt = gr.Textbox(\n show_label=False,\n placeholder=\"Enter text and press enter, or upload an image\",\n ).style(container=False)\n with gr.Column(scale=0.15, min_width=0):\n btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n\n txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n bot, chatbot, chatbot\n )\n txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n\ndemo.queue()\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [], "preprocessing": "passes the messages in the Chatbot as a List[List[str | None | Tuple]], i.e. a list of lists. The inner list has 2 elements: the user message and the response message. See `Postprocessing` for the format of these messages.", "postprocessing": "expects function to return a List[List[str | None | Tuple]], i.e. a list of lists. The inner list should have 2 elements: the user message and the response message. The individual messages can be (1) strings in valid Markdown, (2) tuples if sending files: (a filepath or URL to a file, [optional string alt text]) -- if the file is image/video/audio, it is displayed in the Chatbot, or (3) None, in which case the message is not displayed.", "parent": "gradio", "prev_obj": "Button", "next_obj": "Checkbox"}, "checkbox": {"class": null, "name": "Checkbox", "description": "Creates a checkbox that can be set to `True` or `False`.
", "tags": {"preprocessing": "passes the status of the checkbox as a {bool} into the function.", "postprocessing": "expects a {bool} returned from the function and, if it is True, checks the checkbox.", "examples-format": "a {bool} representing whether the box is checked.", "demos": "sentence_builder, titanic_survival"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "bool | Callable", "doc": "if True, checked by default. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "False"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "info", "annotation": "str | None", "doc": "additional component description.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, this checkbox can be checked; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Checkbox"}, {"fn": null, "name": "input", "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Checkbox"}, {"fn": null, "name": "select", "description": "Event listener for when the user selects or deselects Checkbox. Uses event data gradio.SelectData to carry `value` referring to label of checkbox, and `selected` to refer to state of checkbox. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Checkbox"}], "string_shortcuts": [["Checkbox", "checkbox", "Uses default values"]], "demos": [["sentence_builder", "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["titanic_survival", "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "passes the status of the checkbox as a bool into the function.", "postprocessing": "expects a bool returned from the function and, if it is True, checks the checkbox.", "examples-format": "a bool representing whether the box is checked.", "parent": "gradio", "prev_obj": "Chatbot", "next_obj": "CheckboxGroup"}, "checkboxgroup": {"class": null, "name": "CheckboxGroup", "description": "Creates a set of checkboxes of which a subset can be checked.", "tags": {"preprocessing": "passes the list of checked checkboxes as a {List[str]} or their indices as a {List[int]} into the function, depending on `type`.", "postprocessing": "expects a {List[str]}, each element of which becomes a checked checkbox.", "examples-format": "a {List[str]} representing the values to be checked.", "demos": "sentence_builder, titanic_survival"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "choices", "annotation": "list[str] | None", "doc": "list of options to select from.", "default": "None"}, {"name": "value", "annotation": "list[str] | str | Callable | None", "doc": "default selected list of options. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "type", "annotation": "Literal['value', 'index']", "doc": "Type of value to be returned by component. \"value\" returns the list of strings of the choices selected, \"index\" returns the list of indices of the choices selected.", "default": "\"value\""}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "info", "annotation": "str | None", "doc": "additional component description.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, choices in this checkbox group will be checkable; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.CheckboxGroup"}, {"fn": null, "name": "input", "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.CheckboxGroup"}, {"fn": null, "name": "select", "description": "Event listener for when the user selects or deselects within CheckboxGroup. Uses event data gradio.SelectData to carry `value` referring to label of selected checkbox, `index` to refer to index, and `selected` to refer to state of checkbox. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.CheckboxGroup"}], "string_shortcuts": [["CheckboxGroup", "checkboxgroup", "Uses default values"]], "demos": [["sentence_builder", "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["titanic_survival", "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "passes the list of checked checkboxes as a List[str] or their indices as a List[int] into the function, depending on `type`.", "postprocessing": "expects a List[str], each element of which becomes a checked checkbox.", "examples-format": "a List[str] representing the values to be checked.", "parent": "gradio", "prev_obj": "Checkbox", "next_obj": "ClearButton"}, "clearbutton": {"class": null, "name": "ClearButton", "description": "Button that clears the value of a component or a list of components when clicked. It is instantiated with the list of components to clear.", "tags": {"preprocessing": "passes the button value as a {str} into the function", "postprocessing": "expects a {str} to be returned from a function, which is set as the label of the button"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "components", "annotation": "None | list[Component] | Component", "doc": null, "default": "None"}, {"name": "value", "annotation": "str", "doc": "Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "\"Clear\""}, {"name": "variant", "annotation": "Literal['primary', 'secondary', 'stop']", "doc": "'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button.", "default": "\"secondary\""}, {"name": "size", "annotation": "Literal['sm', 'lg'] | None", "doc": "Size of the button. Can be \"sm\" or \"lg\".", "default": "None"}, {"name": "icon", "annotation": "str | None", "doc": "URL or path to the icon file to display within the button. If None, no icon will be displayed.", "default": "None"}, {"name": "link", "annotation": "str | None", "doc": "URL to open when the button is clicked. If None, no link will be used.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "interactive", "annotation": "bool", "doc": "If False, the Button will be in a disabled state.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int | None", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "add", "description": "Adds a component or list of components to the list of components that will be cleared when the button is clicked.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "components", "annotation": "None | Component | list[Component]", "doc": null}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.ClearButton"}, {"fn": null, "name": "click", "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.ClearButton"}], "string_shortcuts": [["ClearButton", "clearbutton", "Uses default values"]], "preprocessing": "passes the button value as a str into the function", "postprocessing": "expects a str to be returned from a function, which is set as the label of the button", "parent": "gradio", "prev_obj": "CheckboxGroup", "next_obj": "Code"}, "code": {"class": null, "name": "Code", "description": "Creates a Code editor for entering, editing or viewing code.", "tags": {"preprocessing": "passes a {str} of code into the function.", "postprocessing": "expects the function to return a {str} of code or a single-elment {tuple}: (string filepath,)"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | tuple[str] | None", "doc": "Default value to show in the code editor. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "language", "annotation": "Literal['python', 'markdown', 'json', 'html', 'css', 'javascript', 'typescript', 'yaml', 'dockerfile', 'shell', 'r'] | None", "doc": "The language to display the code as. Supported languages listed in `gr.Code.languages`.", "default": "None"}, {"name": "lines", "annotation": "int", "doc": null, "default": "5"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "interactive", "annotation": "bool | None", "doc": "Whether user should be able to enter code or only view it.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "languages", "description": "['python', 'markdown', 'json', 'html', 'css', 'javascript', 'typescript', 'yaml', 'dockerfile', 'shell', 'r', None]", "tags": {}, "parameters": {}, "returns": {}, "example": "", "override_signature": "gr.Code.languages", "parent": "gradio.Code"}, {"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Code"}, {"fn": null, "name": "input", "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Code"}], "string_shortcuts": [["Code", "code", "Uses default values"]], "preprocessing": "passes a str of code into the function.", "postprocessing": "expects the function to return a str of code or a single-elment tuple: (string filepath,)", "parent": "gradio", "prev_obj": "ClearButton", "next_obj": "ColorPicker"}, "colorpicker": {"class": null, "name": "ColorPicker", "description": "Creates a color picker for user to select a color as string input.", "tags": {"preprocessing": "passes selected color value as a {str} into the function.", "postprocessing": "expects a {str} returned from function and sets color picker value to it.", "examples-format": "a {str} with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.", "demos": "color_picker, color_generator"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | Callable | None", "doc": "default text to provide in color picker. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "info", "annotation": "str | None", "doc": "additional component description.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.ColorPicker"}, {"fn": null, "name": "input", "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.ColorPicker"}, {"fn": null, "name": "submit", "description": "This listener is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.ColorPicker"}, {"fn": null, "name": "focus", "description": "This listener is triggered when the component is focused (e.g. when the user clicks inside a textbox). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.ColorPicker"}, {"fn": null, "name": "blur", "description": "This listener is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.ColorPicker"}], "string_shortcuts": [["ColorPicker", "colorpicker", "Uses default values"]], "demos": [["color_picker", "import gradio as gr\nimport numpy as np\nimport os\nfrom PIL import Image, ImageColor\n\n\ndef change_color(icon, color):\n\n \"\"\"\n Function that given an icon in .png format changes its color\n Args:\n icon: Icon whose color needs to be changed.\n color: Chosen color with which to edit the input icon.\n Returns:\n edited_image: Edited icon.\n \"\"\"\n img = icon.convert(\"LA\")\n img = img.convert(\"RGBA\")\n image_np = np.array(icon)\n _, _, _, alpha = image_np.T\n mask = alpha > 0\n image_np[..., :-1][mask.T] = ImageColor.getcolor(color, \"RGB\")\n edited_image = Image.fromarray(image_np)\n return edited_image\n\n\ninputs = [\n gr.Image(label=\"icon\", type=\"pil\", image_mode=\"RGBA\"),\n gr.ColorPicker(label=\"color\"),\n]\noutputs = gr.Image(label=\"colored icon\")\n\ndemo = gr.Interface(\n fn=change_color,\n inputs=inputs,\n outputs=outputs,\n examples=[\n [os.path.join(os.path.dirname(__file__), \"rabbit.png\"), \"#ff0000\"],\n [os.path.join(os.path.dirname(__file__), \"rabbit.png\"), \"#0000FF\"],\n ],\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["color_generator", "import gradio as gr\nimport cv2\nimport numpy as np\nimport random\n\n\n# Convert decimal color to hexadecimal color\ndef RGB_to_Hex(rgb):\n color = \"#\"\n for i in rgb:\n num = int(i)\n color += str(hex(num))[-2:].replace(\"x\", \"0\").upper()\n return color\n\n\n# Randomly generate light or dark colors\ndef random_color(is_light=True):\n return (\n random.randint(0, 127) + int(is_light) * 128,\n random.randint(0, 127) + int(is_light) * 128,\n random.randint(0, 127) + int(is_light) * 128,\n )\n\n\ndef switch_color(color_style):\n if color_style == \"light\":\n is_light = True\n elif color_style == \"dark\":\n is_light = False\n back_color_ = random_color(is_light) # Randomly generate colors\n back_color = RGB_to_Hex(back_color_) # Convert to hexadecimal\n\n # Draw color pictures.\n w, h = 50, 50\n img = np.zeros((h, w, 3), np.uint8)\n cv2.rectangle(img, (0, 0), (w, h), back_color_, thickness=-1)\n\n return back_color, back_color, img\n\n\ninputs = [gr.Radio([\"light\", \"dark\"], value=\"light\")]\n\noutputs = [\n gr.ColorPicker(label=\"color\"),\n gr.Textbox(label=\"hexadecimal color\"),\n gr.Image(type=\"numpy\", label=\"color picture\"),\n]\n\ntitle = \"Color Generator\"\ndescription = (\n \"Click the Submit button, and a dark or light color will be randomly generated.\"\n)\n\ndemo = gr.Interface(\n fn=switch_color,\n inputs=inputs,\n outputs=outputs,\n title=title,\n description=description,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "passes selected color value as a str into the function.", "postprocessing": "expects a str returned from function and sets color picker value to it.", "examples-format": "a str with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.", "parent": "gradio", "prev_obj": "Code", "next_obj": "Dataframe"}, "dataframe": {"class": null, "name": "Dataframe", "description": "Accepts or displays 2D input through a spreadsheet-like component for dataframes.", "tags": {"preprocessing": "passes the uploaded spreadsheet data as a {pandas.DataFrame}, {numpy.array}, {List[List]}, or {List} depending on `type`", "postprocessing": "expects a {pandas.DataFrame}, {numpy.array}, {List[List]}, {List}, a {Dict} with keys `data` (and optionally `headers`), or {str} path to a csv, which is rendered in the spreadsheet.", "examples-format": "a {str} filepath to a csv with data, a pandas dataframe, or a list of lists (excluding headers) where each sublist is a row of data.", "demos": "filter_records, matrix_transpose, tax_calculator"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "list[list[Any]] | Callable | None", "doc": "Default value as a 2-dimensional list of values. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "headers", "annotation": "list[str] | None", "doc": "List of str header names. If None, no headers are shown.", "default": "None"}, {"name": "row_count", "annotation": "int | tuple[int, str]", "doc": "Limit number of rows for input and decide whether user can create new rows. The first element of the tuple is an `int`, the row count; the second should be 'fixed' or 'dynamic', the new row behaviour. If an `int` is passed the rows default to 'dynamic'", "default": "(1, 'dynamic')"}, {"name": "col_count", "annotation": "int | tuple[int, str] | None", "doc": "Limit number of columns for input and decide whether user can create new columns. The first element of the tuple is an `int`, the number of columns; the second should be 'fixed' or 'dynamic', the new column behaviour. If an `int` is passed the columns default to 'dynamic'", "default": "None"}, {"name": "datatype", "annotation": "str | list[str]", "doc": "Datatype of values in sheet. Can be provided per column as a list of strings, or for the entire sheet as a single string. Valid datatypes are \"str\", \"number\", \"bool\", \"date\", and \"markdown\".", "default": "\"str\""}, {"name": "type", "annotation": "Literal['pandas', 'numpy', 'array']", "doc": "Type of value to be returned by component. \"pandas\" for pandas dataframe, \"numpy\" for numpy array, or \"array\" for a Python array.", "default": "\"pandas\""}, {"name": "max_rows", "annotation": "int | None", "doc": "Maximum number of rows to display at once. Set to None for infinite.", "default": "20"}, {"name": "max_cols", "annotation": "int | None", "doc": "Maximum number of columns to display at once. Set to None for infinite.", "default": "None"}, {"name": "overflow_row_behaviour", "annotation": "Literal['paginate', 'show_ends']", "doc": "If set to \"paginate\", will create pages for overflow rows. If set to \"show_ends\", will show initial and final rows and truncate middle rows.", "default": "\"paginate\""}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, will allow users to edit the dataframe; if False, can only be used to display data. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "wrap", "annotation": "bool", "doc": "if True text in table cells will wrap when appropriate, if False the table will scroll horizontally. Defaults to False.", "default": "False"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Dataframe"}, {"fn": null, "name": "input", "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Dataframe"}, {"fn": null, "name": "select", "description": "Event listener for when the user selects cell within Dataframe. Uses event data gradio.SelectData to carry `value` referring to value of selected cell, and `index` tuple to refer to index row and column. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Dataframe"}], "string_shortcuts": [["Dataframe", "dataframe", "Uses default values"], ["Numpy", "numpy", "Uses type=\"numpy\""], ["Matrix", "matrix", "Uses type=\"array\""], ["List", "list", "Uses type=\"array\", col_count=1"]], "demos": [["filter_records", "import gradio as gr\n\n\ndef filter_records(records, gender):\n return records[records[\"gender\"] == gender]\n\n\ndemo = gr.Interface(\n filter_records,\n [\n gr.Dataframe(\n headers=[\"name\", \"age\", \"gender\"],\n datatype=[\"str\", \"number\", \"str\"],\n row_count=5,\n col_count=(3, \"fixed\"),\n ),\n gr.Dropdown([\"M\", \"F\", \"O\"]),\n ],\n \"dataframe\",\n description=\"Enter gender as 'M', 'F', or 'O' for other.\",\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["matrix_transpose", "import numpy as np\n\nimport gradio as gr\n\n\ndef transpose(matrix):\n return matrix.T\n\n\ndemo = gr.Interface(\n transpose,\n gr.Dataframe(type=\"numpy\", datatype=\"number\", row_count=5, col_count=3),\n \"numpy\",\n examples=[\n [np.zeros((3, 3)).tolist()],\n [np.ones((2, 2)).tolist()],\n [np.random.randint(0, 10, (3, 10)).tolist()],\n [np.random.randint(0, 10, (10, 3)).tolist()],\n [np.random.randint(0, 10, (10, 10)).tolist()],\n ],\n cache_examples=False\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["tax_calculator", "import gradio as gr\n\ndef tax_calculator(income, marital_status, assets):\n tax_brackets = [(10, 0), (25, 8), (60, 12), (120, 20), (250, 30)]\n total_deductible = sum(assets[\"Cost\"])\n taxable_income = income - total_deductible\n\n total_tax = 0\n for bracket, rate in tax_brackets:\n if taxable_income > bracket:\n total_tax += (taxable_income - bracket) * rate / 100\n\n if marital_status == \"Married\":\n total_tax *= 0.75\n elif marital_status == \"Divorced\":\n total_tax *= 0.8\n\n return round(total_tax)\n\ndemo = gr.Interface(\n tax_calculator,\n [\n \"number\",\n gr.Radio([\"Single\", \"Married\", \"Divorced\"]),\n gr.Dataframe(\n headers=[\"Item\", \"Cost\"],\n datatype=[\"str\", \"number\"],\n label=\"Assets Purchased this Year\",\n ),\n ],\n \"number\",\n examples=[\n [10000, \"Married\", [[\"Suit\", 5000], [\"Laptop\", 800], [\"Car\", 1800]]],\n [80000, \"Single\", [[\"Suit\", 800], [\"Watch\", 1800], [\"Car\", 800]]],\n ],\n)\n\ndemo.launch()\n"]], "preprocessing": "passes the uploaded spreadsheet data as a pandas.DataFrame, numpy.array, List[List], or List depending on `type`", "postprocessing": "expects a pandas.DataFrame, numpy.array, List[List], List, a Dict with keys `data` (and optionally `headers`), or str path to a csv, which is rendered in the spreadsheet.", "examples-format": "a str filepath to a csv with data, a pandas dataframe, or a list of lists (excluding headers) where each sublist is a row of data.", "parent": "gradio", "prev_obj": "ColorPicker", "next_obj": "Dataset"}, "dataset": {"class": null, "name": "Dataset", "description": "Used to create an output widget for showing datasets. Used to render the examples box.", "tags": {"preprocessing": "passes the selected sample either as a {list} of data (if type=\"value\") or as an {int} index (if type=\"index\")", "postprocessing": "expects a {list} of {lists} corresponding to the dataset data."}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "label", "annotation": "str | None", "doc": null, "default": "None"}, {"name": "components", "annotation": "list[IOComponent] | list[str]", "doc": "Which component types to show in this dataset widget, can be passed in as a list of string names or Components instances. The following components are supported in a Dataset: Audio, Checkbox, CheckboxGroup, ColorPicker, Dataframe, Dropdown, File, HTML, Image, Markdown, Model3D, Number, Radio, Slider, Textbox, TimeSeries, Video"}, {"name": "samples", "annotation": "list[list[Any]] | None", "doc": "a nested list of samples. Each sublist within the outer list represents a data sample, and each element within the sublist represents an value for each component", "default": "None"}, {"name": "headers", "annotation": "list[str] | None", "doc": "Column headers in the Dataset widget, should be the same len as components. If not provided, inferred from component labels", "default": "None"}, {"name": "type", "annotation": "Literal['values', 'index']", "doc": "'values' if clicking on a sample should pass the value of the sample, or \"index\" if it should pass the index of the sample", "default": "\"values\""}, {"name": "samples_per_page", "annotation": "int", "doc": "how many examples to show per page.", "default": "10"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "click", "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Dataset"}, {"fn": null, "name": "select", "description": "This listener is triggered when the user selects from within the Component. This event has EventData of type gradio.SelectData that carries information, accessible through SelectData.index and SelectData.value. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Dataset"}], "string_shortcuts": [["Dataset", "dataset", "Uses default values"]], "preprocessing": "passes the selected sample either as a list of data (if type=\"value\") or as an int index (if type=\"index\")", "postprocessing": "expects a list of lists corresponding to the dataset data.", "override_signature": "gr.Dataset(components, samples)", "parent": "gradio", "prev_obj": "Dataframe", "next_obj": "Dropdown"}, "dropdown": {"class": null, "name": "Dropdown", "description": "Creates a dropdown of choices from which entries can be selected.", "tags": {"preprocessing": "passes the value of the selected dropdown entry as a {str} or its index as an {int} into the function, depending on `type`.", "postprocessing": "expects a {str} corresponding to the value of the dropdown entry to be selected.", "examples-format": "a {str} representing the drop down value to select.", "demos": "sentence_builder, titanic_survival"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "choices", "annotation": "list[str] | None", "doc": "list of options to select from.", "default": "None"}, {"name": "value", "annotation": "str | list[str] | Callable | None", "doc": "default value(s) selected in dropdown. If None, no value is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "type", "annotation": "Literal['value', 'index']", "doc": "Type of value to be returned by component. \"value\" returns the string of the choice selected, \"index\" returns the index of the choice selected.", "default": "\"value\""}, {"name": "multiselect", "annotation": "bool | None", "doc": "if True, multiple choices can be selected.", "default": "None"}, {"name": "allow_custom_value", "annotation": "bool", "doc": "If True, allows user to enter a custom value that is not in the list of choices. Only applies if `multiselect` is False.", "default": "False"}, {"name": "max_choices", "annotation": "int | None", "doc": "maximum number of choices that can be selected. If None, no limit is enforced.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "info", "annotation": "str | None", "doc": "additional component description.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, choices in this dropdown will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Dropdown"}, {"fn": null, "name": "input", "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Dropdown"}, {"fn": null, "name": "focus", "description": "This listener is triggered when the component is focused (e.g. when the user clicks inside a textbox). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Dropdown"}, {"fn": null, "name": "blur", "description": "This listener is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Dropdown"}, {"fn": null, "name": "select", "description": "Event listener for when the user selects Dropdown option. Uses event data gradio.SelectData to carry `value` referring to label of selected option, and `index` to refer to index. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Dropdown"}], "string_shortcuts": [["Dropdown", "dropdown", "Uses default values"]], "demos": [["sentence_builder", "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["titanic_survival", "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "passes the value of the selected dropdown entry as a str or its index as an int into the function, depending on `type`.", "postprocessing": "expects a str corresponding to the value of the dropdown entry to be selected.", "examples-format": "a str representing the drop down value to select.", "parent": "gradio", "prev_obj": "Dataset", "next_obj": "DuplicateButton"}, "duplicatebutton": {"class": null, "name": "DuplicateButton", "description": "Button that triggers a Spaces Duplication, when the demo is on Hugging Face Spaces. Does nothing locally.", "tags": {"preprocessing": "passes the button value as a {str} into the function", "postprocessing": "expects a {str} to be returned from a function, which is set as the label of the button"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str", "doc": "Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "\"Duplicate Space\""}, {"name": "variant", "annotation": "Literal['primary', 'secondary', 'stop']", "doc": "'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button.", "default": "\"secondary\""}, {"name": "size", "annotation": "Literal['sm', 'lg'] | None", "doc": "Size of the button. Can be \"sm\" or \"lg\".", "default": "\"sm\""}, {"name": "icon", "annotation": "str | None", "doc": "URL or path to the icon file to display within the button. If None, no icon will be displayed.", "default": "None"}, {"name": "link", "annotation": "str | None", "doc": "URL to open when the button is clicked. If None, no link will be used.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "interactive", "annotation": "bool", "doc": "If False, the Button will be in a disabled state.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "0"}, {"name": "min_width", "annotation": "int | None", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "click", "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.DuplicateButton"}], "string_shortcuts": [["DuplicateButton", "duplicatebutton", "Uses default values"]], "preprocessing": "passes the button value as a str into the function", "postprocessing": "expects a str to be returned from a function, which is set as the label of the button", "parent": "gradio", "prev_obj": "Dropdown", "next_obj": "File"}, "file": {"class": null, "name": "File", "description": "Creates a file component that allows uploading generic file (when used as an input) and or displaying generic files (output).", "tags": {"preprocessing": "passes the uploaded file as a {tempfile._TemporaryFileWrapper} or {List[tempfile._TemporaryFileWrapper]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`)", "postprocessing": "expects function to return a {str} path to a file, or {List[str]} consisting of paths to files.", "examples-format": "a {str} path to a local file that populates the component.", "demos": "zip_to_json, zip_files"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | list[str] | Callable | None", "doc": "Default file to display, given as str file path. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "file_count", "annotation": "Literal['single', 'multiple', 'directory']", "doc": "if single, allows user to upload one file. If \"multiple\", user uploads multiple files. If \"directory\", user uploads all files in selected directory. Return type will be list for each file in case of \"multiple\" or \"directory\".", "default": "\"single\""}, {"name": "file_types", "annotation": "list[str] | None", "doc": "List of file extensions or types of files to be uploaded (e.g. ['image', '.json', '.mp4']). \"file\" allows any file to be uploaded, \"image\" allows only image files to be uploaded, \"audio\" allows only audio files to be uploaded, \"video\" allows only video files to be uploaded, \"text\" allows only text files to be uploaded.", "default": "None"}, {"name": "type", "annotation": "Literal['file', 'binary']", "doc": "Type of value to be returned by component. \"file\" returns a temporary file object with the same base name as the uploaded file, whose full path can be retrieved by file_obj.name, \"binary\" returns an bytes object.", "default": "\"file\""}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, will allow users to upload a file; if False, can only be used to display files. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.File"}, {"fn": null, "name": "clear", "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.File"}, {"fn": null, "name": "upload", "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.File"}, {"fn": null, "name": "select", "description": "Event listener for when the user selects file from list. Uses event data gradio.SelectData to carry `value` referring to name of selected file, and `index` to refer to index. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.File"}], "string_shortcuts": [["File", "file", "Uses default values"], ["Files", "files", "Uses file_count=\"multiple\""]], "demos": [["zip_to_json", "from zipfile import ZipFile\n\nimport gradio as gr\n\n\ndef zip_to_json(file_obj):\n files = []\n with ZipFile(file_obj.name) as zfile:\n for zinfo in zfile.infolist():\n files.append(\n {\n \"name\": zinfo.filename,\n \"file_size\": zinfo.file_size,\n \"compressed_size\": zinfo.compress_size,\n }\n )\n return files\n\n\ndemo = gr.Interface(zip_to_json, \"file\", \"json\")\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["zip_files", "import os\nfrom zipfile import ZipFile\n\nimport gradio as gr\n\n\ndef zip_files(files):\n with ZipFile(\"tmp.zip\", \"w\") as zipObj:\n for idx, file in enumerate(files):\n zipObj.write(file.name, file.name.split(\"/\")[-1])\n return \"tmp.zip\"\n\ndemo = gr.Interface(\n zip_files,\n gr.File(file_count=\"multiple\", file_types=[\"text\", \".json\", \".csv\"]),\n \"file\",\n examples=[[[os.path.join(os.path.dirname(__file__),\"files/titanic.csv\"), \n os.path.join(os.path.dirname(__file__),\"files/titanic.csv\"), \n os.path.join(os.path.dirname(__file__),\"files/titanic.csv\")]]], \n cache_examples=True\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "passes the uploaded file as a tempfile._TemporaryFileWrapper or List[tempfile._TemporaryFileWrapper] depending on `file_count` (or a bytes/Listbytes depending on `type`)", "postprocessing": "expects function to return a str path to a file, or List[str] consisting of paths to files.", "examples-format": "a str path to a local file that populates the component.", "parent": "gradio", "prev_obj": "DuplicateButton", "next_obj": "Gallery"}, "gallery": {"class": null, "name": "Gallery", "description": "Used to display a list of images as a gallery that can be scrolled through.
", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects a list of images in any format, {List[numpy.array | PIL.Image | str | pathlib.Path]}, or a {List} of (image, {str} caption) tuples and displays them.", "demos": "fake_gan"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "list[np.ndarray | _Image.Image | str | Path | tuple] | Callable | None", "doc": "List of images to display in the gallery by default. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "columns", "annotation": "int | tuple | None", "doc": "Represents the number of images that should be shown in one row, for each of the six standard screen sizes (<576px, <768px, <992px, <1200px, <1400px, >1400px). if fewer that 6 are given then the last will be used for all subsequent breakpoints", "default": "2"}, {"name": "rows", "annotation": "int | tuple | None", "doc": "Represents the number of rows in the image grid, for each of the six standard screen sizes (<576px, <768px, <992px, <1200px, <1400px, >1400px). if fewer that 6 are given then the last will be used for all subsequent breakpoints", "default": "None"}, {"name": "height", "annotation": "str | None", "doc": "Height of the gallery.", "default": "None"}, {"name": "preview", "annotation": "bool | None", "doc": "If True, will display the Gallery in preview mode, which shows all of the images as thumbnails and allows the user to click on them to view them in full size.", "default": "None"}, {"name": "object_fit", "annotation": "Literal['contain', 'cover', 'fill', 'none', 'scale-down'] | None", "doc": "CSS object-fit property for the thumbnail images in the gallery. Can be \"contain\", \"cover\", \"fill\", \"none\", or \"scale-down\".", "default": "None"}, {"name": "allow_preview", "annotation": "bool", "doc": "If True, images in the gallery will be enlarged when they are clicked. Default is True.", "default": "True"}, {"name": "show_share_button", "annotation": "bool | None", "doc": "If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.", "default": "None"}, {"name": "show_download_button", "annotation": "bool | None", "doc": "If True, will show a download button in the corner of the selected image. If False, the icon does not appear. Default is True.", "default": "True"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "select", "description": "Event listener for when the user selects image within Gallery. Uses event data gradio.SelectData to carry `value` referring to caption of selected image, and `index` to refer to index. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Gallery"}], "string_shortcuts": [["Gallery", "gallery", "Uses default values"]], "demos": [["fake_gan", "# This demo needs to be run from the repo folder.\n# python demo/fake_gan/run.py\nimport random\n\nimport gradio as gr\n\n\ndef fake_gan():\n images = [\n (random.choice(\n [\n \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n ]\n ), f\"label {i}\" if i != 0 else \"label\" * 50)\n for i in range(3)\n ]\n return images\n\n\nwith gr.Blocks() as demo:\n with gr.Column(variant=\"panel\"):\n with gr.Row(variant=\"compact\"):\n text = gr.Textbox(\n label=\"Enter your prompt\",\n show_label=False,\n max_lines=1,\n placeholder=\"Enter your prompt\",\n ).style(\n container=False,\n )\n btn = gr.Button(\"Generate image\").style(full_width=False)\n\n gallery = gr.Gallery(\n label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n ).style(columns=[2], rows=[2], object_fit=\"contain\", height=\"auto\")\n\n btn.click(fake_gan, None, gallery)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects a list of images in any format, List[numpy.array | PIL.Image | str | pathlib.Path], or a List of (image, str caption) tuples and displays them.", "parent": "gradio", "prev_obj": "File", "next_obj": "HTML"}, "html": {"class": null, "name": "HTML", "description": "Used to display arbitrary HTML output.
", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects a valid HTML {str}.", "demos": "text_analysis", "guides": "key-features"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | Callable", "doc": "Default value. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "\"\""}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.HTML"}], "string_shortcuts": [["HTML", "html", "Uses default values"]], "demos": [["text_analysis", "import gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n doc = nlp(text)\n html = displacy.render(doc, style=\"dep\", page=True)\n html = (\n \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n"]], "guides": [{"name": "key-features", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 2, "absolute_index": 1, "pretty_name": "Key Features", "content": "# Key Features\n\nLet's go through some of the most popular features of Gradio! Here are Gradio's key features:\n\n1. [Adding example inputs](#example-inputs)\n2. [Passing custom error messages](#alerts)\n3. [Adding descriptive content](#descriptive-content)\n4. [Setting up flagging](#flagging)\n5. [Preprocessing and postprocessing](#preprocessing-and-postprocessing)\n6. [Styling demos](#styling)\n7. [Queuing users](#queuing)\n8. [Iterative outputs](#iterative-outputs)\n9. [Progress bars](#progress-bars)\n10. [Batch functions](#batch-functions)\n11. [Running on collaborative notebooks](#colab-notebooks)\n\n## Example Inputs\n\nYou can provide example data that a user can easily load into `Interface`. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a **nested list** to the `examples=` keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the [Docs](https://gradio.app/docs#components).\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n if num2 == 0:\n raise gr.Error(\"Cannot divide by zero!\")\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\", \n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n examples=[\n [5, \"add\", 3],\n [4, \"divide\", 2],\n [-4, \"multiply\", 2.5],\n [0, \"subtract\", 1.2],\n ],\n title=\"Toy Calculator\",\n description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n```\n\n\nYou can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the `examples_per_page` argument of `Interface`).\n\nContinue learning about examples in the [More On Examples](https://gradio.app/more-on-examples) guide.\n\n## Alerts\n\nYou wish to pass custom error messages to the user. To do so, raise a `gr.Error(\"custom message\")` to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the [docs](https://gradio.app/docs#error). \n\nYou can also issue `gr.Warning(\"message\")` and `gr.Info(\"message\")` by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work. \n\nNote below how the `gr.Error` has to be raised, while the `gr.Warning` and `gr.Info` are single lines.\n\n```python\ndef start_process(name):\n gr.Info(\"Starting process\")\n if name is None:\n gr.Warning(\"Name is empty\")\n ...\n if success == False:\n raise gr.Error(\"Process failed\")\n```\n \n## Descriptive Content\n\nIn the previous example, you may have noticed the `title=` and `description=` keyword arguments in the `Interface` constructor that helps users understand your app.\n\nThere are three arguments in the `Interface` constructor to specify where this content should go:\n\n* `title`: which accepts text and can display it at the very top of interface, and also becomes the page title.\n* `description`: which accepts text, markdown or HTML and places it right under the title.\n* `article`: which also accepts text, markdown or HTML and places it below the interface.\n\n![annotated](https://github.com/gradio-app/gradio/blob/main/guides/assets/annotated.png?raw=true)\n\nIf you're using the `Blocks` API instead, you can insert text, markdown, or HTML anywhere using the `gr.Markdown(...)` or `gr.HTML(...)` components, with descriptive content inside the `Component` constructor.\n\nAnother useful keyword argument is `label=`, which is present in every `Component`. This modifies the label text at the top of each `Component`. You can also add the `info=` keyword argument to form elements like `Textbox` or `Radio` to provide further information on their usage.\n\n```python\ngr.Number(label='Age', info='In years, must be greater than 0')\n```\n\n## Flagging\n\nBy default, an `Interface` will have \"Flag\" button. When a user testing your `Interface` sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the `flagging_dir=` argument to the `Interface` constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.\n\nFor example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- calculator.py\n+-- flagged/\n| +-- logs.csv\n```\n\n*flagged/logs.csv*\n\n```csv\nnum1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n```\n\nWith the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- sepia.py\n+-- flagged/\n| +-- logs.csv\n| +-- im/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n\n*flagged/logs.csv*\n\n```csv\nim,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.\n\n## Preprocessing and Postprocessing\n\n![](https://github.com/gradio-app/gradio/blob/main/js/_website/src/assets/img/dataflow.svg?raw=true)\n\nAs you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.\n\nWhen a component is used as an input, Gradio automatically handles the *preprocessing* needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a `numpy` array).\n\nSimilarly, when a component is used as an output, Gradio automatically handles the *postprocessing* needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a `Gallery` of images in base64 format).\n\nYou can control the *preprocessing* using the parameters when constructing the image component. For example, here if you instantiate the `Image` component with the following parameters, it will convert the image to the `PIL` type and reshape it to be `(100, 100)` no matter the original size that it was submitted as:\n\n```py\nimg = gr.Image(shape=(100, 100), type=\"pil\")\n```\n\nIn contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:\n\n```py\nimg = gr.Image(invert_colors=True, type=\"numpy\")\n```\n\nPostprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the `Image` a `numpy` array or a `str` filepath?) and postprocesses it into a format that can be displayed by the browser.\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the preprocessing-related parameters for each Component.\n\n## Styling\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Interface` constructor. For example:\n\n```python\ndemo = gr.Interface(..., theme=gr.themes.Monochrome())\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](https://gradio.app/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n\n```python\nwith gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nSome components can be additionally styled through the `style()` method. For example:\n\n```python\nimg = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n```\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the styling options for each Component.\n\n## Queuing\n\nIf your app expects heavy traffic, use the `queue()` method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(...).queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```python\nwith gr.Blocks() as demo:\n #...\ndemo.queue()\ndemo.launch()\n```\n\nYou can control the number of requests processed at a single time as such:\n\n```python\ndemo.queue(concurrency_count=3)\n```\n\nSee the [Docs on queueing](/docs/#queue) on configuring other queuing parameters.\n\nTo specify only certain functions for queueing in Blocks:\n\n```python\nwith gr.Blocks() as demo2:\n num1 = gr.Number()\n num2 = gr.Number()\n output = gr.Number()\n gr.Button(\"Add\").click(\n lambda a, b: a + b, [num1, num2], output)\n gr.Button(\"Multiply\").click(\n lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n```\n\n## Iterative Outputs\n\nIn some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.\n\nIn such cases, you can supply a **generator** function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single `return` value, a function should `yield` a series of values instead. Usually the `yield` statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:\n\n```python\ndef my_generator(x):\n for i in range(x):\n yield i\n```\n\nYou supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:\n\n```python\nimport gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n for _ in range(steps):\n time.sleep(1)\n image = np.random.random((600, 600, 3))\n yield image\n image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n```\n\n\nNote that we've added a `time.sleep(1)` in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).\n\nSupplying a generator into Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n## Progress Bars\n\nGradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a `gr.Progress` instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the `tqdm()` method of the `Progress` instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.\n\n```python\nimport gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n progress(0, desc=\"Starting\")\n time.sleep(1)\n progress(0.05)\n new_string = \"\"\n for letter in progress.tqdm(word, desc=\"Reversing\"):\n time.sleep(0.25)\n new_string = letter + new_string\n return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n demo.queue(concurrency_count=10).launch()\n\n```\n\n\nIf you use the `tqdm` library, you can even report progress updates automatically from any `tqdm.tqdm` that already exists within your function by setting the default argument as `gr.Progress(track_tqdm=True)`!\n\n## Batch Functions\n\nGradio supports the ability to pass *batch* functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.\n\nFor example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:\n\n```py\nimport time\n\ndef trim_words(words, lens):\n trimmed_words = []\n time.sleep(5)\n for w, l in zip(words, lens):\n trimmed_words.append(w[:int(l)]) \n return [trimmed_words]\n```\n\nThe advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically *batch* incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe `batch=True` and `max_batch_size=16` -- both of these parameters can be passed\ninto event triggers or into the `Interface` class)\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n word = gr.Textbox(label=\"word\")\n leng = gr.Number(label=\"leng\")\n output = gr.Textbox(label=\"Output\")\n with gr.Row():\n run = gr.Button()\n\n event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n```\n\nIn the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face `transformers` and `diffusers` models\nwork very naturally with Gradio's batch mode: here's [an example demo using diffusers to\ngenerate images in batches](https://github.com/gradio-app/gradio/blob/main/demo/diffusers_with_batching/run.py)\n\nNote: using batch functions with Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n\n## Colab Notebooks\n\n\nGradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as [Google Colab](https://colab.research.google.com/). In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by [service worker tunneling](https://github.com/tensorflow/tensorboard/blob/master/docs/design/colab_integration.md), which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use [SSH tunneling](https://coderwall.com/p/ohk6cg/remote-access-to-ipython-notebooks-via-ssh) to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, [discussed in the next Guide](https://gradio.app/guides/sharing-your-app/#sharing-demos). ", "html": "

Key Features

\n\n

Let's go through some of the most popular features of Gradio! Here are Gradio's key features:

\n\n
    \n
  1. Adding example inputs
  2. \n
  3. Passing custom error messages
  4. \n
  5. Adding descriptive content
  6. \n
  7. Setting up flagging
  8. \n
  9. Preprocessing and postprocessing
  10. \n
  11. Styling demos
  12. \n
  13. Queuing users
  14. \n
  15. Iterative outputs
  16. \n
  17. Progress bars
  18. \n
  19. Batch functions
  20. \n
  21. Running on collaborative notebooks
  22. \n
\n\n

Example Inputs

\n\n

You can provide example data that a user can easily load into Interface. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a nested list to the examples= keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the Docs.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        if num2 == 0:\n            raise gr.Error(\"Cannot divide by zero!\")\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\", \n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    examples=[\n        [5, \"add\", 3],\n        [4, \"divide\", 2],\n        [-4, \"multiply\", 2.5],\n        [0, \"subtract\", 1.2],\n    ],\n    title=\"Toy Calculator\",\n    description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n
\n\n

\n\n

You can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the examples_per_page argument of Interface).

\n\n

Continue learning about examples in the More On Examples guide.

\n\n

Alerts

\n\n

You wish to pass custom error messages to the user. To do so, raise a gr.Error(\"custom message\") to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the docs.

\n\n

You can also issue gr.Warning(\"message\") and gr.Info(\"message\") by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work.

\n\n

Note below how the gr.Error has to be raised, while the gr.Warning and gr.Info are single lines.

\n\n
def start_process(name):\n    gr.Info(\"Starting process\")\n    if name is None:\n        gr.Warning(\"Name is empty\")\n    ...\n    if success == False:\n        raise gr.Error(\"Process failed\")\n
\n\n

Descriptive Content

\n\n

In the previous example, you may have noticed the title= and description= keyword arguments in the Interface constructor that helps users understand your app.

\n\n

There are three arguments in the Interface constructor to specify where this content should go:

\n\n
    \n
  • title: which accepts text and can display it at the very top of interface, and also becomes the page title.
  • \n
  • description: which accepts text, markdown or HTML and places it right under the title.
  • \n
  • article: which also accepts text, markdown or HTML and places it below the interface.
  • \n
\n\n

\"annotated\"

\n\n

If you're using the Blocks API instead, you can insert text, markdown, or HTML anywhere using the gr.Markdown(...) or gr.HTML(...) components, with descriptive content inside the Component constructor.

\n\n

Another useful keyword argument is label=, which is present in every Component. This modifies the label text at the top of each Component. You can also add the info= keyword argument to form elements like Textbox or Radio to provide further information on their usage.

\n\n
gr.Number(label='Age', info='In years, must be greater than 0')\n
\n\n

Flagging

\n\n

By default, an Interface will have \"Flag\" button. When a user testing your Interface sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the flagging_dir= argument to the Interface constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.

\n\n

For example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- calculator.py\n+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n
\n\n

With the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- sepia.py\n+-- flagged/\n|   +-- logs.csv\n|   +-- im/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.

\n\n

Preprocessing and Postprocessing

\n\n

\"\"

\n\n

As you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.

\n\n

When a component is used as an input, Gradio automatically handles the preprocessing needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a numpy array).

\n\n

Similarly, when a component is used as an output, Gradio automatically handles the postprocessing needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a Gallery of images in base64 format).

\n\n

You can control the preprocessing using the parameters when constructing the image component. For example, here if you instantiate the Image component with the following parameters, it will convert the image to the PIL type and reshape it to be (100, 100) no matter the original size that it was submitted as:

\n\n
img = gr.Image(shape=(100, 100), type=\"pil\")\n
\n\n

In contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:

\n\n
img = gr.Image(invert_colors=True, type=\"numpy\")\n
\n\n

Postprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the Image a numpy array or a str filepath?) and postprocesses it into a format that can be displayed by the browser.

\n\n

Take a look at the Docs to see all the preprocessing-related parameters for each Component.

\n\n

Styling

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Interface constructor. For example:

\n\n
demo = gr.Interface(..., theme=gr.themes.Monochrome())\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.\nThe base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

Some components can be additionally styled through the style() method. For example:

\n\n
img = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n
\n\n

Take a look at the Docs to see all the styling options for each Component.

\n\n

Queuing

\n\n

If your app expects heavy traffic, use the queue() method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).

\n\n

With Interface:

\n\n
demo = gr.Interface(...).queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
with gr.Blocks() as demo:\n    #...\ndemo.queue()\ndemo.launch()\n
\n\n

You can control the number of requests processed at a single time as such:

\n\n
demo.queue(concurrency_count=3)\n
\n\n

See the Docs on queueing on configuring other queuing parameters.

\n\n

To specify only certain functions for queueing in Blocks:

\n\n
with gr.Blocks() as demo2:\n    num1 = gr.Number()\n    num2 = gr.Number()\n    output = gr.Number()\n    gr.Button(\"Add\").click(\n        lambda a, b: a + b, [num1, num2], output)\n    gr.Button(\"Multiply\").click(\n        lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n
\n\n

Iterative Outputs

\n\n

In some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.

\n\n

In such cases, you can supply a generator function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single return value, a function should yield a series of values instead. Usually the yield statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:

\n\n
def my_generator(x):\n    for i in range(x):\n        yield i\n
\n\n

You supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:

\n\n
import gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n    for _ in range(steps):\n        time.sleep(1)\n        image = np.random.random((600, 600, 3))\n        yield image\n    image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n    yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n
\n\n

\n\n

Note that we've added a time.sleep(1) in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).

\n\n

Supplying a generator into Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Progress Bars

\n\n

Gradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a gr.Progress instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the tqdm() method of the Progress instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.

\n\n
import gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n    progress(0, desc=\"Starting\")\n    time.sleep(1)\n    progress(0.05)\n    new_string = \"\"\n    for letter in progress.tqdm(word, desc=\"Reversing\"):\n        time.sleep(0.25)\n        new_string = letter + new_string\n    return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n    demo.queue(concurrency_count=10).launch()\n\n
\n\n

\n\n

If you use the tqdm library, you can even report progress updates automatically from any tqdm.tqdm that already exists within your function by setting the default argument as gr.Progress(track_tqdm=True)!

\n\n

Batch Functions

\n\n

Gradio supports the ability to pass batch functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.

\n\n

For example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:

\n\n
import time\n\ndef trim_words(words, lens):\n    trimmed_words = []\n    time.sleep(5)\n    for w, l in zip(words, lens):\n        trimmed_words.append(w[:int(l)])        \n    return [trimmed_words]\n
\n\n

The advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically batch incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe batch=True and max_batch_size=16 -- both of these parameters can be passed\ninto event triggers or into the Interface class)

\n\n

With Interface:

\n\n
demo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n                    batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        word = gr.Textbox(label=\"word\")\n        leng = gr.Number(label=\"leng\")\n        output = gr.Textbox(label=\"Output\")\n    with gr.Row():\n        run = gr.Button()\n\n    event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n
\n\n

In the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face transformers and diffusers models\nwork very naturally with Gradio's batch mode: here's an example demo using diffusers to\ngenerate images in batches

\n\n

Note: using batch functions with Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Colab Notebooks

\n\n

Gradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as Google Colab. In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by service worker tunneling, which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use SSH tunneling to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, discussed in the next Guide.

\n", "tags": [], "spaces": [], "url": "/guides/key-features/", "contributor": null}], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects a valid HTML str.", "parent": "gradio", "prev_obj": "Gallery", "next_obj": "HighlightedText"}, "highlightedtext": {"class": null, "name": "HighlightedText", "description": "Displays text that contains spans that are highlighted by category or numerical value.
", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects a {List[Tuple[str, float | str]]]} consisting of spans of text and their associated labels, or a {Dict} with two keys: (1) \"text\" whose value is the complete text, and (2) \"entities\", which is a list of dictionaries, each of which have the keys: \"entity\" (consisting of the entity label, can alternatively be called \"entity_group\"), \"start\" (the character index where the label starts), and \"end\" (the character index where the label ends). Entities should not overlap.", "demos": "diff_texts, text_analysis", "guides": "named-entity-recognition"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "list[tuple[str, str | float | None]] | dict | Callable | None", "doc": "Default value to show. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "color_map", "annotation": "dict[str, str] | None", "doc": null, "default": "None"}, {"name": "show_legend", "annotation": "bool", "doc": "whether to show span categories in a separate legend or inline.", "default": "False"}, {"name": "combine_adjacent", "annotation": "bool", "doc": "If True, will merge the labels of adjacent tokens belonging to the same category.", "default": "False"}, {"name": "adjacent_separator", "annotation": "str", "doc": "Specifies the separator to be used between tokens if combine_adjacent is True.", "default": "\"\""}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.HighlightedText"}, {"fn": null, "name": "select", "description": "Event listener for when the user selects Highlighted text span. Uses event data gradio.SelectData to carry `value` referring to selected [text, label] tuple, and `index` to refer to span index. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.HighlightedText"}], "string_shortcuts": [["HighlightedText", "highlightedtext", "Uses default values"]], "demos": [["diff_texts", "from difflib import Differ\n\nimport gradio as gr\n\n\ndef diff_texts(text1, text2):\n d = Differ()\n return [\n (token[2:], token[0] if token[0] != \" \" else None)\n for token in d.compare(text1, text2)\n ]\n\n\ndemo = gr.Interface(\n diff_texts,\n [\n gr.Textbox(\n label=\"Text 1\",\n info=\"Initial text\",\n lines=3,\n value=\"The quick brown fox jumped over the lazy dogs.\",\n ),\n gr.Textbox(\n label=\"Text 2\",\n info=\"Text to compare\",\n lines=3,\n value=\"The fast brown fox jumps over lazy dogs.\",\n ),\n ],\n gr.HighlightedText(\n label=\"Diff\",\n combine_adjacent=True,\n show_legend=True,\n ).style(color_map={\"+\": \"red\", \"-\": \"green\"}),\n theme=gr.themes.Base()\n)\nif __name__ == \"__main__\":\n demo.launch()\n"], ["text_analysis", "import gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n doc = nlp(text)\n html = displacy.render(doc, style=\"dep\", page=True)\n html = (\n \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n"]], "guides": [{"name": "named-entity-recognition", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 39, "pretty_name": "Named Entity Recognition", "content": "# Named-Entity Recognition \n\n\n\n\n## Introduction\n\nNamed-entity recognition (NER), also known as token classification or text tagging, is the task of taking a sentence and classifying every word (or \"token\") into different categories, such as names of people or names of locations, or different parts of speech. \n\nFor example, given the sentence:\n\n> Does Chicago have any Pakistani restaurants?\n\nA named-entity recognition algorithm may identify:\n\n* \"Chicago\" as a **location**\n* \"Pakistani\" as an **ethnicity** \n\n\nand so on. \n\nUsing `gradio` (specifically the `HighlightedText` component), you can easily build a web demo of your NER model and share that with the rest of your team.\n\nHere is an example of a demo that you'll be able to build:\n\n\n\nThis tutorial will show how to take a pretrained NER model and deploy it with a Gradio interface. We will show two different ways to use the `HighlightedText` component -- depending on your NER model, either of these two ways may be easier to learn! \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained named-entity recognition model. You can use your own, while in this tutorial, we will use one from the `transformers` library.\n\n### Approach 1: List of Entity Dictionaries\n\nMany named-entity recognition models output a list of dictionaries. Each dictionary consists of an *entity*, a \"start\" index, and an \"end\" index. This is, for example, how NER models in the `transformers` library operate:\n\n```py\nfrom transformers import pipeline \nner_pipeline = pipeline(\"ner\")\nner_pipeline(\"Does Chicago have any Pakistani restaurants\")\n```\n\nOutput:\n\n```bash\n[{'entity': 'I-LOC',\n 'score': 0.9988978,\n 'index': 2,\n 'word': 'Chicago',\n 'start': 5,\n 'end': 12},\n {'entity': 'I-MISC',\n 'score': 0.9958592,\n 'index': 5,\n 'word': 'Pakistani',\n 'start': 22,\n 'end': 31}]\n```\n\nIf you have such a model, it is very easy to hook it up to Gradio's `HighlightedText` component. All you need to do is pass in this **list of entities**, along with the **original text** to the model, together as dictionary, with the keys being `\"entities\"` and `\"text\"` respectively.\n\nHere is a complete example:\n\n```python\nfrom transformers import pipeline\n\nimport gradio as gr\n\nner_pipeline = pipeline(\"ner\")\n\nexamples = [\n \"Does Chicago have any stores and does Joe live here?\",\n]\n\ndef ner(text):\n output = ner_pipeline(text)\n return {\"text\": text, \"entities\": output} \n\ndemo = gr.Interface(ner,\n gr.Textbox(placeholder=\"Enter sentence here...\"), \n gr.HighlightedText(),\n examples=examples)\n\ndemo.launch()\n\n```\n\n\n### Approach 2: List of Tuples\n\nAn alternative way to pass data into the `HighlightedText` component is a list of tuples. The first element of each tuple should be the word or words that are being classified into a particular entity. The second element should be the entity label (or `None` if they should be unlabeled). The `HighlightedText` component automatically strings together the words and labels to display the entities.\n\nIn some cases, this can be easier than the first approach. Here is a demo showing this approach using Spacy's parts-of-speech tagger:\n\n```python\nimport gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n doc = nlp(text)\n html = displacy.render(doc, style=\"dep\", page=True)\n html = (\n \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n\n```\n\n\n\n--------------------------------------------\n\n\nAnd you're done! That's all you need to know to build a web-based GUI for your NER model. \n\nFun tip: you can share your NER demo instantly with others simply by setting `share=True` in `launch()`. \n\n\n", "html": "

Named-Entity Recognition

\n\n

Introduction

\n\n

Named-entity recognition (NER), also known as token classification or text tagging, is the task of taking a sentence and classifying every word (or \"token\") into different categories, such as names of people or names of locations, or different parts of speech.

\n\n

For example, given the sentence:

\n\n
\n

Does Chicago have any Pakistani restaurants?

\n
\n\n

A named-entity recognition algorithm may identify:

\n\n
    \n
  • \"Chicago\" as a location
  • \n
  • \"Pakistani\" as an ethnicity
  • \n
\n\n

and so on.

\n\n

Using gradio (specifically the HighlightedText component), you can easily build a web demo of your NER model and share that with the rest of your team.

\n\n

Here is an example of a demo that you'll be able to build:

\n\n

\n\n

This tutorial will show how to take a pretrained NER model and deploy it with a Gradio interface. We will show two different ways to use the HighlightedText component -- depending on your NER model, either of these two ways may be easier to learn!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained named-entity recognition model. You can use your own, while in this tutorial, we will use one from the transformers library.

\n\n

Approach 1: List of Entity Dictionaries

\n\n

Many named-entity recognition models output a list of dictionaries. Each dictionary consists of an entity, a \"start\" index, and an \"end\" index. This is, for example, how NER models in the transformers library operate:

\n\n
from transformers import pipeline \nner_pipeline = pipeline(\"ner\")\nner_pipeline(\"Does Chicago have any Pakistani restaurants\")\n
\n\n

Output:

\n\n
[{'entity': 'I-LOC',\n  'score': 0.9988978,\n  'index': 2,\n  'word': 'Chicago',\n  'start': 5,\n  'end': 12},\n {'entity': 'I-MISC',\n  'score': 0.9958592,\n  'index': 5,\n  'word': 'Pakistani',\n  'start': 22,\n  'end': 31}]\n
\n\n

If you have such a model, it is very easy to hook it up to Gradio's HighlightedText component. All you need to do is pass in this list of entities, along with the original text to the model, together as dictionary, with the keys being \"entities\" and \"text\" respectively.

\n\n

Here is a complete example:

\n\n
from transformers import pipeline\n\nimport gradio as gr\n\nner_pipeline = pipeline(\"ner\")\n\nexamples = [\n    \"Does Chicago have any stores and does Joe live here?\",\n]\n\ndef ner(text):\n    output = ner_pipeline(text)\n    return {\"text\": text, \"entities\": output}    \n\ndemo = gr.Interface(ner,\n             gr.Textbox(placeholder=\"Enter sentence here...\"), \n             gr.HighlightedText(),\n             examples=examples)\n\ndemo.launch()\n\n
\n\n

\n\n

Approach 2: List of Tuples

\n\n

An alternative way to pass data into the HighlightedText component is a list of tuples. The first element of each tuple should be the word or words that are being classified into a particular entity. The second element should be the entity label (or None if they should be unlabeled). The HighlightedText component automatically strings together the words and labels to display the entities.

\n\n

In some cases, this can be easier than the first approach. Here is a demo showing this approach using Spacy's parts-of-speech tagger:

\n\n
import gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n    doc = nlp(text)\n    html = displacy.render(doc, style=\"dep\", page=True)\n    html = (\n        \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n\n
\n\n

\n\n
\n\n

And you're done! That's all you need to know to build a web-based GUI for your NER model.

\n\n

Fun tip: you can share your NER demo instantly with others simply by setting share=True in launch().

\n", "tags": ["NER", "TEXT", "HIGHLIGHT"], "spaces": ["https://huggingface.co/spaces/rajistics/biobert_ner_demo", "https://huggingface.co/spaces/abidlabs/ner", "https://huggingface.co/spaces/rajistics/Financial_Analyst_AI"], "url": "/guides/named-entity-recognition/", "contributor": null}], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects a List[Tuple[str, float | str]]] consisting of spans of text and their associated labels, or a Dict with two keys: (1) \"text\" whose value is the complete text, and (2) \"entities\", which is a list of dictionaries, each of which have the keys: \"entity\" (consisting of the entity label, can alternatively be called \"entity_group\"), \"start\" (the character index where the label starts), and \"end\" (the character index where the label ends). Entities should not overlap.", "parent": "gradio", "prev_obj": "HTML", "next_obj": "Image"}, "image": {"class": null, "name": "Image", "description": "Creates an image component that can be used to upload/draw images (as an input) or display images (as an output).", "tags": {"preprocessing": "passes the uploaded image as a {numpy.array}, {PIL.Image} or {str} filepath depending on `type` -- unless `tool` is `sketch` AND source is one of `upload` or `webcam`. In these cases, a {dict} with keys `image` and `mask` is passed, and the format of the corresponding values depends on `type`.", "postprocessing": "expects a {numpy.array}, {PIL.Image} or {str} or {pathlib.Path} filepath to an image and displays the image.", "examples-format": "a {str} filepath to a local file that contains the image.", "demos": "image_mod, image_mod_default_image", "guides": "image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, building-a-pictionary_app, create-your-own-friends-with-a-gan"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | _Image.Image | np.ndarray | None", "doc": "A PIL Image, numpy array, path or URL for the default value that Image component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "shape", "annotation": "tuple[int, int] | None", "doc": "(width, height) shape to crop and resize image when passed to function. If None, matches input image size. Pass None for either width or height to only crop and resize the other.", "default": "None"}, {"name": "height", "annotation": "int | None", "doc": "Height of the displayed image in pixels.", "default": "None"}, {"name": "width", "annotation": "int | None", "doc": "Width of the displayed image in pixels.", "default": "None"}, {"name": "image_mode", "annotation": "Literal['1', 'L', 'P', 'RGB', 'RGBA', 'CMYK', 'YCbCr', 'LAB', 'HSV', 'I', 'F']", "doc": "\"RGB\" if color, or \"L\" if black and white. See https://pillow.readthedocs.io/en/stable/handbook/concepts.html for other supported image modes and their meaning.", "default": "\"RGB\""}, {"name": "invert_colors", "annotation": "bool", "doc": "whether to invert the image as a preprocessing step.", "default": "False"}, {"name": "source", "annotation": "Literal['upload', 'webcam', 'canvas']", "doc": "Source of image. \"upload\" creates a box where user can drop an image file, \"webcam\" allows user to take snapshot from their webcam, \"canvas\" defaults to a white image that can be edited and drawn upon with tools.", "default": "\"upload\""}, {"name": "tool", "annotation": "Literal['editor', 'select', 'sketch', 'color-sketch'] | None", "doc": "Tools used for editing. \"editor\" allows a full screen editor (and is the default if source is \"upload\" or \"webcam\"), \"select\" provides a cropping and zoom tool, \"sketch\" allows you to create a binary sketch (and is the default if source=\"canvas\"), and \"color-sketch\" allows you to created a sketch in different colors. \"color-sketch\" can be used with source=\"upload\" or \"webcam\" to allow sketching on an image. \"sketch\" can also be used with \"upload\" or \"webcam\" to create a mask over an image and in that case both the image and mask are passed into the function as a dictionary with keys \"image\" and \"mask\" respectively.", "default": "None"}, {"name": "type", "annotation": "Literal['numpy', 'pil', 'filepath']", "doc": "The format the image is converted to before being passed into the prediction function. \"numpy\" converts the image to a numpy array with shape (height, width, 3) and values from 0 to 255, \"pil\" converts the image to a PIL image object, \"filepath\" passes a str path to a temporary file containing the image.", "default": "\"numpy\""}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "show_download_button", "annotation": "bool", "doc": "If True, will display button to download image.", "default": "True"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, will allow users to upload and edit an image; if False, can only be used to display images. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "streaming", "annotation": "bool", "doc": "If True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'webcam'.", "default": "False"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "mirror_webcam", "annotation": "bool", "doc": "If True webcam will be mirrored. Default is True.", "default": "True"}, {"name": "brush_radius", "annotation": "float | None", "doc": "Size of the brush for Sketch. Default is None which chooses a sensible default", "default": "None"}, {"name": "brush_color", "annotation": "str", "doc": "Color of the brush for Sketch as hex string. Default is \"#000000\".", "default": "\"#000000\""}, {"name": "mask_opacity", "annotation": "float", "doc": "Opacity of mask drawn on image, as a value between 0 and 1.", "default": "0.7"}, {"name": "show_share_button", "annotation": "bool | None", "doc": "If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Image"}, {"fn": null, "name": "edit", "description": "This listener is triggered when the user edits the component (e.g. image) using the built-in editor. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Image"}, {"fn": null, "name": "clear", "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Image"}, {"fn": null, "name": "stream", "description": "This listener is triggered when the user streams the component (e.g. a live webcam component). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Image"}, {"fn": null, "name": "upload", "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Image"}, {"fn": null, "name": "select", "description": "Event listener for when the user clicks on a pixel within the image. Uses event data gradio.SelectData to carry `index` to refer to the [x, y] coordinates of the clicked pixel. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Image"}], "string_shortcuts": [["Image", "image", "Uses default values"], ["Webcam", "webcam", "Uses source=\"webcam\", interactive=True"], ["Sketchpad", "sketchpad", "Uses image_mode=\"L\", source=\"canvas\", shape=(28, 28), invert_colors=True, interactive=True"], ["Paint", "paint", "Uses source=\"canvas\", tool=\"color-sketch\", interactive=True"], ["ImageMask", "imagemask", "Uses source=\"upload\", tool=\"sketch\", interactive=True"], ["ImagePaint", "imagepaint", "Uses source=\"upload\", tool=\"color-sketch\", interactive=True"], ["Pil", "pil", "Uses type=\"pil\""]], "demos": [["image_mod", "import gradio as gr\nimport os\n\n\ndef image_mod(image):\n return image.rotate(45)\n\n\ndemo = gr.Interface(\n image_mod,\n gr.Image(type=\"pil\"),\n \"image\",\n flagging_options=[\"blurry\", \"incorrect\", \"other\"],\n examples=[\n os.path.join(os.path.dirname(__file__), \"images/cheetah1.jpg\"),\n os.path.join(os.path.dirname(__file__), \"images/lion.jpg\"),\n os.path.join(os.path.dirname(__file__), \"images/logo.png\"),\n os.path.join(os.path.dirname(__file__), \"images/tower.jpg\"),\n ],\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["image_mod_default_image", "import gradio as gr\nimport os\n\n\ndef image_mod(image):\n return image.rotate(45)\n\n\ncheetah = os.path.join(os.path.dirname(__file__), \"images/cheetah1.jpg\")\n\ndemo = gr.Interface(image_mod, gr.Image(type=\"pil\", value=cheetah), \"image\",\n flagging_options=[\"blurry\", \"incorrect\", \"other\"], examples=[\n os.path.join(os.path.dirname(__file__), \"images/lion.jpg\"),\n os.path.join(os.path.dirname(__file__), \"images/logo.png\")\n ])\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [{"name": "image-classification-in-pytorch", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 20, "pretty_name": "Image Classification In Pytorch", "content": "# Image Classification in PyTorch\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained image classification model, so you should also have `torch` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from [PyTorch Hub](https://pytorch.org/hub/pytorch_vision_resnet/). You can use a different pretrained model or train your own. \n\n```python\nimport torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n```\n\nBecause we will be using the model for inference, we have called the `.eval()` method.\n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n inp = transforms.ToTensor()(inp).unsqueeze(0)\n with torch.no_grad():\n prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)} \n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `PIL` image\n\nThen, the function converts the image to a PIL Image and then eventually a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we use `Image(type=\"pil\")` which creates the component and handles the preprocessing to convert that to a `PIL` image. \n\nThe output component will be a `Label`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as `Label(num_top_classes=3)`.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n inputs=gr.Image(type=\"pil\"),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification in PyTorch

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained image classification model, so you should also have torch installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from PyTorch Hub. You can use a different pretrained model or train your own.

\n\n
import torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n
\n\n

Because we will be using the model for inference, we have called the .eval() method.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n  inp = transforms.ToTensor()(inp).unsqueeze(0)\n  with torch.no_grad():\n    prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n    confidences = {labels[i]: float(prediction[i]) for i in range(1000)}    \n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a PIL image
  • \n
\n\n

Then, the function converts the image to a PIL Image and then eventually a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we use Image(type=\"pil\") which creates the component and handles the preprocessing to convert that to a PIL image.

\n\n

The output component will be a Label, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as Label(num_top_classes=3).

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=gr.Image(type=\"pil\"),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "RESNET", "PYTORCH"], "spaces": ["https://huggingface.co/spaces/abidlabs/pytorch-image-classifier", "https://huggingface.co/spaces/pytorch/ResNet", "https://huggingface.co/spaces/pytorch/ResNext", "https://huggingface.co/spaces/pytorch/SqueezeNet"], "url": "/guides/image-classification-in-pytorch/", "contributor": null}, {"name": "image-classification-in-tensorflow", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 21, "pretty_name": "Image Classification In Tensorflow", "content": "# Image Classification in TensorFlow and Keras\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained Keras image classification model, so you should also have `tensorflow` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from [Keras](https://keras.io/api/applications/mobilenet/). You can use a different pretrained model or train your own. \n\n```python\nimport tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n```\n\nThis line automatically downloads the MobileNet model and weights using the Keras library. \n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `numpy` array\n\nThen, the function adds a batch dimension, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we can use the `\"gradio.inputs.Image\"` class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.\n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=classify_image, \n inputs=gr.Image(shape=(224, 224)),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification in TensorFlow and Keras

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained Keras image classification model, so you should also have tensorflow installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from Keras. You can use a different pretrained model or train your own.

\n\n
import tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n
\n\n

This line automatically downloads the MobileNet model and weights using the Keras library.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n  inp = inp.reshape((-1, 224, 224, 3))\n  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n  prediction = inception_net.predict(inp).flatten()\n  confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a numpy array
  • \n
\n\n

Then, the function adds a batch dimension, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we can use the \"gradio.inputs.Image\" class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=classify_image, \n             inputs=gr.Image(shape=(224, 224)),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "MOBILENET", "TENSORFLOW"], "spaces": ["https://huggingface.co/spaces/abidlabs/keras-image-classifier"], "url": "/guides/image-classification-in-tensorflow/", "contributor": null}, {"name": "image-classification-with-vision-transformers", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 22, "pretty_name": "Image Classification With Vision Transformers", "content": "# Image Classification with Vision Transformers\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control. \n\nState-of-the-art image classifiers are based on the *transformers* architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a **single line of Python**, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started).\n\n## Step 1 \u2014 Choosing a Vision Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a model from the [Hugging Face Model Hub](https://huggingface.co/models?pipeline_tag=image-classification). The Hub contains thousands of models covering dozens of different machine learning tasks. \n\nExpand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.\n\nAt the time of writing, the most popular one is `google/vit-base-patch16-224`, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo. \n\n## Step 2 \u2014 Loading the Vision Transformer Model with Gradio\n\nWhen using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.\n\nBesides the import statement, it only takes a single line of Python to load and launch the demo. \n\nWe use the `gr.Interface.load()` method and pass in the path to the model including the `huggingface/` to designate that it is from the Hugging Face Hub.\n\n```python\nimport gradio as gr\n\ngr.Interface.load(\n \"huggingface/google/vit-base-patch16-224\",\n examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n```\n\nNotice that we have added one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. \n\nThis produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!\n\n\n\n----------\n\nAnd you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification with Vision Transformers

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control.

\n\n

State-of-the-art image classifiers are based on the transformers architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a single line of Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Step 1 \u2014 Choosing a Vision Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a model from the Hugging Face Model Hub. The Hub contains thousands of models covering dozens of different machine learning tasks.

\n\n

Expand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.

\n\n

At the time of writing, the most popular one is google/vit-base-patch16-224, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo.

\n\n

Step 2 \u2014 Loading the Vision Transformer Model with Gradio

\n\n

When using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.

\n\n

Besides the import statement, it only takes a single line of Python to load and launch the demo.

\n\n

We use the gr.Interface.load() method and pass in the path to the model including the huggingface/ to designate that it is from the Hugging Face Hub.

\n\n
import gradio as gr\n\ngr.Interface.load(\n             \"huggingface/google/vit-base-patch16-224\",\n             examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n
\n\n

Notice that we have added one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples.

\n\n

This produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!

\n\n\n\n
\n\n

And you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "TRANSFORMERS", "HUB"], "spaces": ["https://huggingface.co/spaces/abidlabs/vision-transformer"], "url": "/guides/image-classification-with-vision-transformers/", "contributor": null}, {"name": "create-your-own-friends-with-a-gan", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 34, "pretty_name": "Create Your Own Friends With A Gan", "content": "# Create Your Own Friends with a GAN\n\n\n\n\n\n\n\n## Introduction\n\nIt seems that cryptocurrencies, [NFTs](https://www.nytimes.com/interactive/2022/03/18/technology/nft-guide.html), and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets [may be taxable, such as in Canada](https://www.canada.ca/en/revenue-agency/programs/about-canada-revenue-agency-cra/compliance/digital-currency/cryptocurrency-guide.html), today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated [CryptoPunks](https://www.larvalabs.com/cryptopunks).\n\nGenerative Adversarial Networks, often known just as *GANs*, are a specific class of deep-learning models that are designed to learn from an input dataset to create (*generate!*) new material that is convincingly similar to elements of the original training set. Famously, the website [thispersondoesnotexist.com](https://thispersondoesnotexist.com/) went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even [music](https://salu133445.github.io/musegan/)!\n\nToday we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:\n\n\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained model, also install `torch` and `torchvision`.\n\n## GANs: a very brief introduction\n\nOriginally proposed in [Goodfellow et al. 2014](https://arxiv.org/abs/1406.2661), GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the *generator*, is responsible for generating images. The other network, the *discriminator*, receives an image at a time from the generator along with a **real** image from the training data set. The discriminator then has to guess: which image is the fake?\n\nThe generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (*adversarial!*) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!\n\nFor a more in-depth look at GANs, you can take a look at [this excellent post on Analytics Vidhya](https://www.analyticsvidhya.com/blog/2021/06/a-detailed-explanation-of-gan-with-implementation-using-tensorflow-and-keras/) or this [PyTorch tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html). For now, though, we'll dive into a demo!\n\n## Step 1 \u2014 Create the Generator model\n\nTo generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:\n\n```python\nfrom torch import nn\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n```\n\nWe're taking the generator from [this repo by @teddykoker](https://github.com/teddykoker/cryptopunks-gan/blob/main/train.py#L90), where you can also see the original discriminator model structure.\n\nAfter instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at [nateraw/cryptopunks-gan](https://huggingface.co/nateraw/cryptopunks-gan):\n\n```python\nfrom huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n```\n\n## Step 2 \u2014 Defining a `predict` function\n\nThe `predict` function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our `predict` function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use `torchvision`'s `save_image` function to save the output of the model as a `png` file, and return the file name:\n\n```python\nfrom torchvision.utils import save_image\n\ndef predict(seed):\n num_punks = 4\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWe're giving our `predict` function a `seed` parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.\n\n*Note!* Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.\n\n## Step 3 \u2014 Creating a Gradio interface\n\nAt this point you can even run the code you have with `predict()`, and you'll find your freshly generated punks in your file system at `./punks.png`. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:\n\n* Set a slider input so users can choose the \"seed\" value\n* Use an image component for our output to showcase the generated punks\n* Use our `predict()` to take the seed and generate the images\n\nWith `gr.Interface()`, we can define all of that with a single function call:\n\n```python\nimport gradio as gr\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n ],\n outputs=\"image\",\n).launch()\n```\n\nLaunching the interface should present you with something like this:\n\n\n\n## Step 4 \u2014 Even more punks!\n\nGenerating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the `inputs` list that we pass to `gr.Interface`:\n\n```python\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n ],\n outputs=\"image\",\n).launch()\n```\n\nThe new input will be passed to our `predict()` function, so we have to make some changes to that function to accept a new parameter:\n\n```python\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWhen you relaunch your interface, you should see a second slider that'll let you control the number of punks!\n\n## Step 5 - Polishing it up\n\nYour Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728\n\nWe can add some examples that users can easily try out by adding this to the `gr.Interface`:\n\n```python\ngr.Interface(\n # ...\n # keep everything as it is, and then add\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n```\n\nThe `examples` parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the `inputs`. So in our case, `[seed, num_punks]`. Give it a try!\n\nYou can also try adding a `title`, `description`, and `article` to the `gr.Interface`. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 `article` will also accept HTML, as [explored in a previous guide](/guides/key-features/#descriptive-content)!\n\nWhen you're all done, you may end up with something like this:\n\n\n\nFor reference, here is our full code:\n\n```python\nimport torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n ],\n outputs=\"image\",\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n```\n----------\n\nCongratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can [scour the Hub for more GANs](https://huggingface.co/models?other=gan) (or train your own) and continue making even more awesome demos \ud83e\udd17", "html": "

Create Your Own Friends with a GAN

\n\n

Introduction

\n\n

It seems that cryptocurrencies, NFTs, and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets may be taxable, such as in Canada, today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated CryptoPunks.

\n\n

Generative Adversarial Networks, often known just as GANs, are a specific class of deep-learning models that are designed to learn from an input dataset to create (generate!) new material that is convincingly similar to elements of the original training set. Famously, the website thispersondoesnotexist.com went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even music!

\n\n

Today we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:

\n\n\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained model, also install torch and torchvision.

\n\n

GANs: a very brief introduction

\n\n

Originally proposed in Goodfellow et al. 2014, GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the generator, is responsible for generating images. The other network, the discriminator, receives an image at a time from the generator along with a real image from the training data set. The discriminator then has to guess: which image is the fake?

\n\n

The generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (adversarial!) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!

\n\n

For a more in-depth look at GANs, you can take a look at this excellent post on Analytics Vidhya or this PyTorch tutorial. For now, though, we'll dive into a demo!

\n\n

Step 1 \u2014 Create the Generator model

\n\n

To generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:

\n\n
from torch import nn\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n
\n\n

We're taking the generator from this repo by @teddykoker, where you can also see the original discriminator model structure.

\n\n

After instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at nateraw/cryptopunks-gan:

\n\n
from huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n
\n\n

Step 2 \u2014 Defining a predict function

\n\n

The predict function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our predict function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use torchvision's save_image function to save the output of the model as a png file, and return the file name:

\n\n
from torchvision.utils import save_image\n\ndef predict(seed):\n    num_punks = 4\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

We're giving our predict function a seed parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.

\n\n

Note! Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.

\n\n

Step 3 \u2014 Creating a Gradio interface

\n\n

At this point you can even run the code you have with predict(<SOME_NUMBER>), and you'll find your freshly generated punks in your file system at ./punks.png. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:

\n\n
    \n
  • Set a slider input so users can choose the \"seed\" value
  • \n
  • Use an image component for our output to showcase the generated punks
  • \n
  • Use our predict() to take the seed and generate the images
  • \n
\n\n

With gr.Interface(), we can define all of that with a single function call:

\n\n
import gradio as gr\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

Launching the interface should present you with something like this:

\n\n\n\n

Step 4 \u2014 Even more punks!

\n\n

Generating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the inputs list that we pass to gr.Interface:

\n\n
gr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

The new input will be passed to our predict() function, so we have to make some changes to that function to accept a new parameter:

\n\n
def predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

When you relaunch your interface, you should see a second slider that'll let you control the number of punks!

\n\n

Step 5 - Polishing it up

\n\n

Your Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728

\n\n

We can add some examples that users can easily try out by adding this to the gr.Interface:

\n\n
gr.Interface(\n    # ...\n    # keep everything as it is, and then add\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n
\n\n

The examples parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the inputs. So in our case, [seed, num_punks]. Give it a try!

\n\n

You can also try adding a title, description, and article to the gr.Interface. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 article will also accept HTML, as explored in a previous guide!

\n\n

When you're all done, you may end up with something like this:

\n\n\n\n

For reference, here is our full code:

\n\n
import torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n    ],\n    outputs=\"image\",\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n
\n\n
\n\n

Congratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can scour the Hub for more GANs (or train your own) and continue making even more awesome demos \ud83e\udd17

\n", "tags": ["GAN", "IMAGE", "HUB"], "spaces": ["https://huggingface.co/spaces/NimaBoscarino/cryptopunks", "https://huggingface.co/spaces/nateraw/cryptopunks-generator"], "url": "/guides/create-your-own-friends-with-a-gan/", "contributor": "Nima Boscarino and Nate Raw"}], "preprocessing": "passes the uploaded image as a numpy.array, PIL.Image or str filepath depending on `type` -- unless `tool` is `sketch` AND source is one of `upload` or `webcam`. In these cases, a dict with keys `image` and `mask` is passed, and the format of the corresponding values depends on `type`.", "postprocessing": "expects a numpy.array, PIL.Image or str or pathlib.Path filepath to an image and displays the image.", "examples-format": "a str filepath to a local file that contains the image.", "parent": "gradio", "prev_obj": "HighlightedText", "next_obj": "Interpretation"}, "interpretation": {"class": null, "name": "Interpretation", "description": "Used to create an interpretation widget for a component.
", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects a {dict} with keys \"original\" and \"interpretation\".", "guides": "custom-interpretations-with-blocks"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "component", "annotation": "Component", "doc": "Which component to show in the interpretation widget."}, {"name": "visible", "annotation": "bool", "doc": "Whether or not the interpretation is visible.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [], "string_shortcuts": [["Interpretation", "interpretation", "Uses default values"]], "guides": [{"name": "custom-interpretations-with-blocks", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 36, "pretty_name": "Custom Interpretations With Blocks", "content": "# Custom Machine Learning Interpretations with Blocks\n\n\n**Prerequisite**: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to [read the Guide to Blocks first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control) as well as the\ninterpretation section of the [Advanced Interface Features Guide](/advanced-interface-features#interpreting-your-predictions).\n\n## Introduction\n\nIf you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the `interpretation` parameter to either \"default\" or \"shap\".\n\nYou may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!\n\nThis guide will show how to:\n\n1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.\n2. Customize how interpretations are displayed in a Blocks app.\n\nLet's get started!\n\n## Setting up the Blocks app\n\nLet's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input `Textbox` and a single output `Label` component.\nBelow is the code for the app as well as the app itself.\n\n```python\nimport gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n pred = sentiment_classifier(text)\n return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n\n classify.click(classifier, input_text, label)\ndemo.launch()\n```\n\n \n\n## Adding interpretations to the app\n\nOur goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!\n\nFor each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those `(word, score)` pairs we can use gradio to visualize them for the user.\n\nThe [shap](https://shap.readthedocs.io/en/stable/index.html) library will help us compute the `(word, score)` pairs and\ngradio will take care of displaying the output to the user.\n\nThe following code computes the `(word, score)` pairs:\n\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n \n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n # Scores contains (word, score) pairs\n \n \n # Format expected by gr.components.Interpretation\n return {\"original\": text, \"interpretation\": scores}\n```\n\nNow, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use `gr.components.Interpretation`.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how `Interface` displays the interpretation output for text.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n interpretation = gr.components.Interpretation(input_text)\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n```\n\n \n\n\n## Customizing how the interpretation is displayed\n\nThe `gr.components.Interpretation` component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?\n\nOne way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.\n\nWe can do this by modifying our `interpretation_function` to additionally return a matplotlib bar plot.\nWe will display it with the `gr.Plot` component in a separate tab.\n\nThis is how the interpretation function will look:\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n # Filter out empty string added by shap\n scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n fig_m = plt.figure()\n \n # Select top 5 words that contribute to positive sentiment\n plt.bar(x=[s[0] for s in scores_desc[:5]],\n height=[s[1] for s in scores_desc[:5]])\n plt.title(\"Top words contributing to positive sentiment\")\n plt.ylabel(\"Shap Value\")\n plt.xlabel(\"Word\")\n return {\"original\": text, \"interpretation\": scores}, fig_m\n```\n\nAnd this is how the app code will look:\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n with gr.Tabs():\n with gr.TabItem(\"Display interpretation with built-in component\"):\n interpretation = gr.components.Interpretation(input_text)\n with gr.TabItem(\"Display interpretation with plot\"):\n interpretation_plot = gr.Plot()\n\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n```\n\nYou can see the demo below!\n\n \n\n## Beyond Sentiment Classification\nAlthough we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an `gr.Image` or `gr.Label` but the input can be almost anything (`gr.Number`, `gr.Slider`, `gr.Radio`, `gr.Image`).\n\nHere is a demo built with blocks of interpretations for an image classification model:\n\n \n\n\n## Closing remarks\n\nWe did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.\n\nWe also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.\n\nAdding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!\n", "html": "

Custom Machine Learning Interpretations with Blocks

\n\n

Prerequisite: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to read the Guide to Blocks first as well as the\ninterpretation section of the Advanced Interface Features Guide.

\n\n

Introduction

\n\n

If you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the interpretation parameter to either \"default\" or \"shap\".

\n\n

You may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!

\n\n

This guide will show how to:

\n\n
    \n
  1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.
  2. \n
  3. Customize how interpretations are displayed in a Blocks app.
  4. \n
\n\n

Let's get started!

\n\n

Setting up the Blocks app

\n\n

Let's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input Textbox and a single output Label component.\nBelow is the code for the app as well as the app itself.

\n\n
import gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n    pred = sentiment_classifier(text)\n    return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n\n    classify.click(classifier, input_text, label)\ndemo.launch()\n
\n\n

\n\n

Adding interpretations to the app

\n\n

Our goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!

\n\n

For each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those (word, score) pairs we can use gradio to visualize them for the user.

\n\n

The shap library will help us compute the (word, score) pairs and\ngradio will take care of displaying the output to the user.

\n\n

The following code computes the (word, score) pairs:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n    # Scores contains (word, score) pairs\n\n\n    # Format expected by gr.components.Interpretation\n    return {\"original\": text, \"interpretation\": scores}\n
\n\n

Now, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use gr.components.Interpretation.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how Interface displays the interpretation output for text.

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            interpretation = gr.components.Interpretation(input_text)\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n
\n\n

\n\n

Customizing how the interpretation is displayed

\n\n

The gr.components.Interpretation component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?

\n\n

One way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.

\n\n

We can do this by modifying our interpretation_function to additionally return a matplotlib bar plot.\nWe will display it with the gr.Plot component in a separate tab.

\n\n

This is how the interpretation function will look:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n    scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n    # Filter out empty string added by shap\n    scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n    fig_m = plt.figure()\n\n    # Select top 5 words that contribute to positive sentiment\n    plt.bar(x=[s[0] for s in scores_desc[:5]],\n            height=[s[1] for s in scores_desc[:5]])\n    plt.title(\"Top words contributing to positive sentiment\")\n    plt.ylabel(\"Shap Value\")\n    plt.xlabel(\"Word\")\n    return {\"original\": text, \"interpretation\": scores}, fig_m\n
\n\n

And this is how the app code will look:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            with gr.Tabs():\n                with gr.TabItem(\"Display interpretation with built-in component\"):\n                    interpretation = gr.components.Interpretation(input_text)\n                with gr.TabItem(\"Display interpretation with plot\"):\n                    interpretation_plot = gr.Plot()\n\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n
\n\n

You can see the demo below!

\n\n

\n\n

Beyond Sentiment Classification

\n\n

Although we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an gr.Image or gr.Label but the input can be almost anything (gr.Number, gr.Slider, gr.Radio, gr.Image).

\n\n

Here is a demo built with blocks of interpretations for an image classification model:

\n\n

\n\n

Closing remarks

\n\n

We did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.

\n\n

We also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.

\n\n

Adding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!

\n", "tags": ["INTERPRETATION", "SENTIMENT ANALYSIS"], "spaces": [], "url": "/guides/custom-interpretations-with-blocks/", "contributor": null}], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects a dict with keys \"original\" and \"interpretation\".", "parent": "gradio", "prev_obj": "Image", "next_obj": "JSON"}, "json": {"class": null, "name": "JSON", "description": "Used to display arbitrary JSON output prettily.
", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects a {str} filepath to a file containing valid JSON -- or a {list} or {dict} that is valid JSON", "demos": "zip_to_json, blocks_xray"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | dict | list | Callable | None", "doc": "Default value. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.JSON"}], "string_shortcuts": [["JSON", "json", "Uses default values"]], "demos": [["zip_to_json", "from zipfile import ZipFile\n\nimport gradio as gr\n\n\ndef zip_to_json(file_obj):\n files = []\n with ZipFile(file_obj.name) as zfile:\n for zinfo in zfile.infolist():\n files.append(\n {\n \"name\": zinfo.filename,\n \"file_size\": zinfo.file_size,\n \"compressed_size\": zinfo.compress_size,\n }\n )\n return files\n\n\ndemo = gr.Interface(zip_to_json, \"file\", \"json\")\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["blocks_xray", "import gradio as gr\nimport time\n\ndisease_values = [0.25, 0.5, 0.75]\n\ndef xray_model(diseases, img):\n return [{disease: disease_values[idx] for idx,disease in enumerate(diseases)}]\n\n\ndef ct_model(diseases, img):\n return [{disease: 0.1 for disease in diseases}]\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n# Detect Disease From Scan\nWith this model you can lorem ipsum\n- ipsum 1\n- ipsum 2\n\"\"\"\n )\n gr.DuplicateButton()\n disease = gr.CheckboxGroup(\n info=\"Select the diseases you want to scan for.\",\n choices=[\"Covid\", \"Malaria\", \"Lung Cancer\"], label=\"Disease to Scan For\"\n )\n slider = gr.Slider(0, 100)\n\n with gr.Tab(\"X-ray\") as x_tab:\n with gr.Row():\n xray_scan = gr.Image()\n xray_results = gr.JSON()\n xray_run = gr.Button(\"Run\")\n xray_run.click(\n xray_model,\n inputs=[disease, xray_scan],\n outputs=xray_results,\n api_name=\"xray_model\"\n )\n\n with gr.Tab(\"CT Scan\"):\n with gr.Row():\n ct_scan = gr.Image()\n ct_results = gr.JSON()\n ct_run = gr.Button(\"Run\")\n ct_run.click(\n ct_model,\n inputs=[disease, ct_scan],\n outputs=ct_results,\n api_name=\"ct_model\"\n )\n\n upload_btn = gr.Button(\"Upload Results\", variant=\"primary\")\n upload_btn.click(\n lambda ct, xr: None,\n inputs=[ct_results, xray_results],\n outputs=[],\n )\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects a str filepath to a file containing valid JSON -- or a list or dict that is valid JSON", "parent": "gradio", "prev_obj": "Interpretation", "next_obj": "Label"}, "label": {"class": null, "name": "Label", "description": "Displays a classification label, along with confidence scores of top categories, if provided.
", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects a {Dict[str, float]} of classes and confidences, or {str} with just the class or an {int}/{float} for regression outputs, or a {str} path to a .json file containing a json dictionary in the structure produced by Label.postprocess().", "demos": "main_note, titanic_survival", "guides": "image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, building-a-pictionary-app"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "dict[str, float] | str | float | Callable | None", "doc": "Default value to show in the component. If a str or number is provided, simply displays the string or number. If a {Dict[str, float]} of classes and confidences is provided, displays the top class on top and the `num_top_classes` below, along with their confidence bars. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "num_top_classes", "annotation": "int | None", "doc": "number of most confident classes to show.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "color", "annotation": "str | None", "doc": "The background color of the label (either a valid css color name or hexadecimal string).", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Label"}, {"fn": null, "name": "select", "description": "Event listener for when the user selects a category from Label. Uses event data gradio.SelectData to carry `value` referring to name of selected category, and `index` to refer to index. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Label"}], "string_shortcuts": [["Label", "label", "Uses default values"]], "demos": [["main_note", "from math import log2, pow\nimport os\n\nimport numpy as np\nfrom scipy.fftpack import fft\n\nimport gradio as gr\n\nA4 = 440\nC0 = A4 * pow(2, -4.75)\nname = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n\n\ndef get_pitch(freq):\n h = round(12 * log2(freq / C0))\n n = h % 12\n return name[n]\n\n\ndef main_note(audio):\n rate, y = audio\n if len(y.shape) == 2:\n y = y.T[0]\n N = len(y)\n T = 1.0 / rate\n yf = fft(y)\n yf2 = 2.0 / N * np.abs(yf[0 : N // 2])\n xf = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)\n\n volume_per_pitch = {}\n total_volume = np.sum(yf2)\n for freq, volume in zip(xf, yf2):\n if freq == 0:\n continue\n pitch = get_pitch(freq)\n if pitch not in volume_per_pitch:\n volume_per_pitch[pitch] = 0\n volume_per_pitch[pitch] += 1.0 * volume / total_volume\n volume_per_pitch = {k: float(v) for k, v in volume_per_pitch.items()}\n return volume_per_pitch\n\n\ndemo = gr.Interface(\n main_note,\n gr.Audio(source=\"microphone\"),\n gr.Label(num_top_classes=4),\n examples=[\n [os.path.join(os.path.dirname(__file__),\"audio/recording1.wav\")],\n [os.path.join(os.path.dirname(__file__),\"audio/cantina.wav\")],\n ],\n interpretation=\"default\",\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["titanic_survival", "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [{"name": "image-classification-in-pytorch", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 20, "pretty_name": "Image Classification In Pytorch", "content": "# Image Classification in PyTorch\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained image classification model, so you should also have `torch` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from [PyTorch Hub](https://pytorch.org/hub/pytorch_vision_resnet/). You can use a different pretrained model or train your own. \n\n```python\nimport torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n```\n\nBecause we will be using the model for inference, we have called the `.eval()` method.\n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n inp = transforms.ToTensor()(inp).unsqueeze(0)\n with torch.no_grad():\n prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)} \n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `PIL` image\n\nThen, the function converts the image to a PIL Image and then eventually a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we use `Image(type=\"pil\")` which creates the component and handles the preprocessing to convert that to a `PIL` image. \n\nThe output component will be a `Label`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as `Label(num_top_classes=3)`.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n inputs=gr.Image(type=\"pil\"),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification in PyTorch

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained image classification model, so you should also have torch installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from PyTorch Hub. You can use a different pretrained model or train your own.

\n\n
import torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n
\n\n

Because we will be using the model for inference, we have called the .eval() method.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n  inp = transforms.ToTensor()(inp).unsqueeze(0)\n  with torch.no_grad():\n    prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n    confidences = {labels[i]: float(prediction[i]) for i in range(1000)}    \n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a PIL image
  • \n
\n\n

Then, the function converts the image to a PIL Image and then eventually a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we use Image(type=\"pil\") which creates the component and handles the preprocessing to convert that to a PIL image.

\n\n

The output component will be a Label, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as Label(num_top_classes=3).

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=gr.Image(type=\"pil\"),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "RESNET", "PYTORCH"], "spaces": ["https://huggingface.co/spaces/abidlabs/pytorch-image-classifier", "https://huggingface.co/spaces/pytorch/ResNet", "https://huggingface.co/spaces/pytorch/ResNext", "https://huggingface.co/spaces/pytorch/SqueezeNet"], "url": "/guides/image-classification-in-pytorch/", "contributor": null}, {"name": "image-classification-in-tensorflow", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 21, "pretty_name": "Image Classification In Tensorflow", "content": "# Image Classification in TensorFlow and Keras\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained Keras image classification model, so you should also have `tensorflow` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from [Keras](https://keras.io/api/applications/mobilenet/). You can use a different pretrained model or train your own. \n\n```python\nimport tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n```\n\nThis line automatically downloads the MobileNet model and weights using the Keras library. \n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `numpy` array\n\nThen, the function adds a batch dimension, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we can use the `\"gradio.inputs.Image\"` class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.\n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=classify_image, \n inputs=gr.Image(shape=(224, 224)),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification in TensorFlow and Keras

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained Keras image classification model, so you should also have tensorflow installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from Keras. You can use a different pretrained model or train your own.

\n\n
import tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n
\n\n

This line automatically downloads the MobileNet model and weights using the Keras library.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n  inp = inp.reshape((-1, 224, 224, 3))\n  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n  prediction = inception_net.predict(inp).flatten()\n  confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a numpy array
  • \n
\n\n

Then, the function adds a batch dimension, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we can use the \"gradio.inputs.Image\" class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=classify_image, \n             inputs=gr.Image(shape=(224, 224)),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "MOBILENET", "TENSORFLOW"], "spaces": ["https://huggingface.co/spaces/abidlabs/keras-image-classifier"], "url": "/guides/image-classification-in-tensorflow/", "contributor": null}, {"name": "image-classification-with-vision-transformers", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 22, "pretty_name": "Image Classification With Vision Transformers", "content": "# Image Classification with Vision Transformers\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control. \n\nState-of-the-art image classifiers are based on the *transformers* architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a **single line of Python**, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started).\n\n## Step 1 \u2014 Choosing a Vision Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a model from the [Hugging Face Model Hub](https://huggingface.co/models?pipeline_tag=image-classification). The Hub contains thousands of models covering dozens of different machine learning tasks. \n\nExpand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.\n\nAt the time of writing, the most popular one is `google/vit-base-patch16-224`, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo. \n\n## Step 2 \u2014 Loading the Vision Transformer Model with Gradio\n\nWhen using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.\n\nBesides the import statement, it only takes a single line of Python to load and launch the demo. \n\nWe use the `gr.Interface.load()` method and pass in the path to the model including the `huggingface/` to designate that it is from the Hugging Face Hub.\n\n```python\nimport gradio as gr\n\ngr.Interface.load(\n \"huggingface/google/vit-base-patch16-224\",\n examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n```\n\nNotice that we have added one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. \n\nThis produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!\n\n\n\n----------\n\nAnd you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification with Vision Transformers

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control.

\n\n

State-of-the-art image classifiers are based on the transformers architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a single line of Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Step 1 \u2014 Choosing a Vision Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a model from the Hugging Face Model Hub. The Hub contains thousands of models covering dozens of different machine learning tasks.

\n\n

Expand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.

\n\n

At the time of writing, the most popular one is google/vit-base-patch16-224, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo.

\n\n

Step 2 \u2014 Loading the Vision Transformer Model with Gradio

\n\n

When using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.

\n\n

Besides the import statement, it only takes a single line of Python to load and launch the demo.

\n\n

We use the gr.Interface.load() method and pass in the path to the model including the huggingface/ to designate that it is from the Hugging Face Hub.

\n\n
import gradio as gr\n\ngr.Interface.load(\n             \"huggingface/google/vit-base-patch16-224\",\n             examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n
\n\n

Notice that we have added one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples.

\n\n

This produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!

\n\n\n\n
\n\n

And you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "TRANSFORMERS", "HUB"], "spaces": ["https://huggingface.co/spaces/abidlabs/vision-transformer"], "url": "/guides/image-classification-with-vision-transformers/", "contributor": null}, {"name": "building-a-pictionary-app", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 33, "pretty_name": "Building A Pictionary App", "content": "# Building a Pictionary App\n\n\n\n\n## Introduction\n\nHow well can an algorithm guess what you're drawing? A few years ago, Google released the **Quick Draw** dataset, which contains drawings made by humans of a variety of every objects. Researchers have used this dataset to train models to guess Pictionary-style drawings. \n\nSuch models are perfect to use with Gradio's *sketchpad* input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and will look like this (try drawing something!):\n\n\n\nLet's get started! This guide covers how to build a pictionary app (step-by-step): \n\n1. [Set up the Sketch Recognition Model](#1-set-up-the-sketch-recognition-model)\n2. [Define a `predict` function](#2-define-a-predict-function)\n3. [Create a Gradio Interface](#3-create-a-gradio-interface)\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained sketchpad model, also install `torch`.\n\n## 1. Set up the Sketch Recognition Model\n\nFirst, you will need a sketch recognition model. Since many researchers have already trained their own models on the Quick Draw dataset, we will use a pretrained model in this tutorial. Our model is a light 1.5 MB model trained by Nate Raw, that [you can download here](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/pytorch_model.bin). \n\nIf you are interested, here [is the code](https://github.com/nateraw/quickdraw-pytorch) that was used to train the model. We will simply load the pretrained model in PyTorch, as follows:\n\n```python\nimport torch\nfrom torch import nn\n\nmodel = nn.Sequential(\n nn.Conv2d(1, 32, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(32, 64, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(64, 128, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Flatten(),\n nn.Linear(1152, 256),\n nn.ReLU(),\n nn.Linear(256, len(LABELS)),\n)\nstate_dict = torch.load('pytorch_model.bin', map_location='cpu')\nmodel.load_state_dict(state_dict, strict=False)\nmodel.eval()\n```\n\n## 2. Define a `predict` function\n\nNext, you will need to define a function that takes in the *user input*, which in this case is a sketched image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/class_names.txt).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nfrom pathlib import Path\n\nLABELS = Path('class_names.txt').read_text().splitlines()\n\ndef predict(img):\n x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.\n with torch.no_grad():\n out = model(x)\n probabilities = torch.nn.functional.softmax(out[0], dim=0)\n values, indices = torch.topk(probabilities, 5)\n confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}\n return confidences\n```\n\nLet's break this down. The function takes one parameters:\n\n* `img`: the input image as a `numpy` array\n\nThen, the function converts the image to a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the top five predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## 3. Create a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a sketchpad. To create a sketchpad input, we can use the convenient string shortcut, `\"sketchpad\"` which creates a canvas for a user to draw on and handles the preprocessing to convert that to a numpy array. \n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form.\n\nFinally, we'll add one more parameter, setting `live=True`, which allows our interface to run in real time, adjusting its predictions every time a user draws on the sketchpad. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n inputs=\"sketchpad\",\n outputs=\"label\",\n live=True).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try drawing something, like a \"snake\" or a \"laptop\"):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a Pictionary-style guessing app. Have fun and try to find some edge cases \ud83e\uddd0\n\n", "html": "

Building a Pictionary App

\n\n

Introduction

\n\n

How well can an algorithm guess what you're drawing? A few years ago, Google released the Quick Draw dataset, which contains drawings made by humans of a variety of every objects. Researchers have used this dataset to train models to guess Pictionary-style drawings.

\n\n

Such models are perfect to use with Gradio's sketchpad input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and will look like this (try drawing something!):

\n\n\n\n

Let's get started! This guide covers how to build a pictionary app (step-by-step):

\n\n
    \n
  1. Set up the Sketch Recognition Model
  2. \n
  3. Define a predict function
  4. \n
  5. Create a Gradio Interface
  6. \n
\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained sketchpad model, also install torch.

\n\n

1. Set up the Sketch Recognition Model

\n\n

First, you will need a sketch recognition model. Since many researchers have already trained their own models on the Quick Draw dataset, we will use a pretrained model in this tutorial. Our model is a light 1.5 MB model trained by Nate Raw, that you can download here.

\n\n

If you are interested, here is the code that was used to train the model. We will simply load the pretrained model in PyTorch, as follows:

\n\n
import torch\nfrom torch import nn\n\nmodel = nn.Sequential(\n    nn.Conv2d(1, 32, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Conv2d(32, 64, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Conv2d(64, 128, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Flatten(),\n    nn.Linear(1152, 256),\n    nn.ReLU(),\n    nn.Linear(256, len(LABELS)),\n)\nstate_dict = torch.load('pytorch_model.bin',    map_location='cpu')\nmodel.load_state_dict(state_dict, strict=False)\nmodel.eval()\n
\n\n

2. Define a predict function

\n\n

Next, you will need to define a function that takes in the user input, which in this case is a sketched image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
from pathlib import Path\n\nLABELS = Path('class_names.txt').read_text().splitlines()\n\ndef predict(img):\n    x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.\n    with torch.no_grad():\n        out = model(x)\n    probabilities = torch.nn.functional.softmax(out[0], dim=0)\n    values, indices = torch.topk(probabilities, 5)\n    confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}\n    return confidences\n
\n\n

Let's break this down. The function takes one parameters:

\n\n
    \n
  • img: the input image as a numpy array
  • \n
\n\n

Then, the function converts the image to a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the top five predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

3. Create a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a sketchpad. To create a sketchpad input, we can use the convenient string shortcut, \"sketchpad\" which creates a canvas for a user to draw on and handles the preprocessing to convert that to a numpy array.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form.

\n\n

Finally, we'll add one more parameter, setting live=True, which allows our interface to run in real time, adjusting its predictions every time a user draws on the sketchpad. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=\"sketchpad\",\n             outputs=\"label\",\n             live=True).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try drawing something, like a \"snake\" or a \"laptop\"):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a Pictionary-style guessing app. Have fun and try to find some edge cases \ud83e\uddd0

\n", "tags": ["SKETCHPAD", "LABELS", "LIVE"], "spaces": ["https://huggingface.co/spaces/nateraw/quickdraw"], "url": "/guides/building-a-pictionary-app/", "contributor": null}], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects a Dict[str, float] of classes and confidences, or str with just the class or an int/float for regression outputs, or a str path to a .json file containing a json dictionary in the structure produced by Label.postprocess().", "parent": "gradio", "prev_obj": "JSON", "next_obj": "LinePlot"}, "lineplot": {"class": null, "name": "LinePlot", "description": "Create a line plot.

", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects a pandas dataframe with the data to plot.", "demos": "line_plot, live_dashboard"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "pd.DataFrame | Callable | None", "doc": "The pandas dataframe containing the data to display in a scatter plot.", "default": "None"}, {"name": "x", "annotation": "str | None", "doc": "Column corresponding to the x axis.", "default": "None"}, {"name": "y", "annotation": "str | None", "doc": "Column corresponding to the y axis.", "default": "None"}, {"name": "color", "annotation": "str | None", "doc": "The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values.", "default": "None"}, {"name": "stroke_dash", "annotation": "str | None", "doc": "The column to determine the symbol used to draw the line, e.g. dashed lines, dashed lines with points.", "default": "None"}, {"name": "overlay_point", "annotation": "bool | None", "doc": "Whether to draw a point on the line for each (x, y) coordinate pair.", "default": "None"}, {"name": "title", "annotation": "str | None", "doc": "The title to display on top of the chart.", "default": "None"}, {"name": "tooltip", "annotation": "list[str] | str | None", "doc": "The column (or list of columns) to display on the tooltip when a user hovers a point on the plot.", "default": "None"}, {"name": "x_title", "annotation": "str | None", "doc": "The title given to the x axis. By default, uses the value of the x parameter.", "default": "None"}, {"name": "y_title", "annotation": "str | None", "doc": "The title given to the y axis. By default, uses the value of the y parameter.", "default": "None"}, {"name": "color_legend_title", "annotation": "str | None", "doc": "The title given to the color legend. By default, uses the value of color parameter.", "default": "None"}, {"name": "stroke_dash_legend_title", "annotation": "str | None", "doc": "The title given to the stroke_dash legend. By default, uses the value of the stroke_dash parameter.", "default": "None"}, {"name": "color_legend_position", "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", "doc": "The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", "default": "None"}, {"name": "stroke_dash_legend_position", "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", "doc": "The position of the stoke_dash legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", "default": "None"}, {"name": "height", "annotation": "int | None", "doc": "The height of the plot in pixels.", "default": "None"}, {"name": "width", "annotation": "int | None", "doc": "The width of the plot in pixels.", "default": "None"}, {"name": "x_lim", "annotation": "list[int] | None", "doc": "A tuple or list containing the limits for the x-axis, specified as [x_min, x_max].", "default": "None"}, {"name": "y_lim", "annotation": "list[int] | None", "doc": "A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].", "default": "None"}, {"name": "caption", "annotation": "str | None", "doc": "The (optional) caption to display below the plot.", "default": "None"}, {"name": "interactive", "annotation": "bool | None", "doc": "Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.", "default": "True"}, {"name": "label", "annotation": "str | None", "doc": "The (optional) label to display on the top left corner of the plot.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "Whether the label should be displayed.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": null, "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": null, "default": "None"}, {"name": "min_width", "annotation": "int", "doc": null, "default": "160"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "Whether the plot should be visible.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.LinePlot"}, {"fn": null, "name": "clear", "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.LinePlot"}], "string_shortcuts": [["LinePlot", "lineplot", "Uses default values"]], "demos": [["line_plot", "import gradio as gr\nfrom vega_datasets import data\n\nstocks = data.stocks()\ngapminder = data.gapminder()\ngapminder = gapminder.loc[\n gapminder.country.isin([\"Argentina\", \"Australia\", \"Afghanistan\"])\n]\nclimate = data.climate()\nseattle_weather = data.seattle_weather()\n\n## Or generate your own fake data, here's an example for stocks:\n#\n# import pandas as pd\n# import random\n#\n# stocks = pd.DataFrame(\n# {\n# \"symbol\": [\n# random.choice(\n# [\n# \"MSFT\",\n# \"AAPL\",\n# \"AMZN\",\n# \"IBM\",\n# \"GOOG\",\n# ]\n# )\n# for _ in range(120)\n# ],\n# \"date\": [\n# pd.Timestamp(year=2000 + i, month=j, day=1)\n# for i in range(10)\n# for j in range(1, 13)\n# ],\n# \"price\": [random.randint(10, 200) for _ in range(120)],\n# }\n# )\n\n\ndef line_plot_fn(dataset):\n if dataset == \"stocks\":\n return gr.LinePlot.update(\n stocks,\n x=\"date\",\n y=\"price\",\n color=\"symbol\",\n color_legend_position=\"bottom\",\n title=\"Stock Prices\",\n tooltip=[\"date\", \"price\", \"symbol\"],\n height=300,\n width=500,\n )\n elif dataset == \"climate\":\n return gr.LinePlot.update(\n climate,\n x=\"DATE\",\n y=\"HLY-TEMP-NORMAL\",\n y_lim=[250, 500],\n title=\"Climate\",\n tooltip=[\"DATE\", \"HLY-TEMP-NORMAL\"],\n height=300,\n width=500,\n )\n elif dataset == \"seattle_weather\":\n return gr.LinePlot.update(\n seattle_weather,\n x=\"date\",\n y=\"temp_min\",\n tooltip=[\"weather\", \"date\"],\n overlay_point=True,\n title=\"Seattle Weather\",\n height=300,\n width=500,\n )\n elif dataset == \"gapminder\":\n return gr.LinePlot.update(\n gapminder,\n x=\"year\",\n y=\"life_expect\",\n color=\"country\",\n title=\"Life expectancy for countries\",\n stroke_dash=\"cluster\",\n x_lim=[1950, 2010],\n tooltip=[\"country\", \"life_expect\"],\n stroke_dash_legend_title=\"Country Cluster\",\n height=300,\n width=500,\n )\n\n\nwith gr.Blocks() as line_plot:\n with gr.Row():\n with gr.Column():\n dataset = gr.Dropdown(\n choices=[\"stocks\", \"climate\", \"seattle_weather\", \"gapminder\"],\n value=\"stocks\",\n )\n with gr.Column():\n plot = gr.LinePlot()\n dataset.change(line_plot_fn, inputs=dataset, outputs=plot)\n line_plot.load(fn=line_plot_fn, inputs=dataset, outputs=plot)\n\n\nif __name__ == \"__main__\":\n line_plot.launch()\n"], ["live_dashboard", "import math\n\nimport pandas as pd\n\nimport gradio as gr\nimport datetime\nimport numpy as np\n\n\ndef get_time():\n return datetime.datetime.now()\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2 * math.pi * period * x)\n update = gr.LinePlot.update(\n value=pd.DataFrame({\"x\": x, \"y\": y}),\n x=\"x\",\n y=\"y\",\n title=\"Plot (updates every second)\",\n width=600,\n height=350,\n )\n plot_end += 2 * math.pi\n if plot_end > 1000:\n plot_end = 2 * math.pi\n return update\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n c_time2 = gr.Textbox(label=\"Current Time refreshed every second\")\n gr.Textbox(\n \"Change the value of the slider to automatically update the plot\",\n label=\"\",\n )\n period = gr.Slider(\n label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1\n )\n plot = gr.LinePlot(show_label=False)\n with gr.Column():\n name = gr.Textbox(label=\"Enter your name\")\n greeting = gr.Textbox(label=\"Greeting\")\n button = gr.Button(value=\"Greet\")\n button.click(lambda s: f\"Hello {s}\", name, greeting)\n\n demo.load(lambda: datetime.datetime.now(), None, c_time2, every=1)\n dep = demo.load(get_plot, None, plot, every=1)\n period.change(get_plot, period, plot, every=1, cancels=[dep])\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n"]], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects a pandas dataframe with the data to plot.", "parent": "gradio", "prev_obj": "Label", "next_obj": "Markdown"}, "markdown": {"class": null, "name": "Markdown", "description": "Used to render arbitrary Markdown output. Can also render latex enclosed by dollar signs.
", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects a valid {str} that can be rendered as Markdown.", "demos": "blocks_hello, blocks_kinematics", "guides": "key-features"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | Callable", "doc": "Value to show in Markdown component. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "\"\""}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "rtl", "annotation": "bool", "doc": "If True, sets the direction of the rendered text to right-to-left. Default is False, which renders text left-to-right.", "default": "False"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Markdown"}], "string_shortcuts": [["Markdown", "markdown", "Uses default values"]], "demos": [["blocks_hello", "import gradio as gr\n\ndef welcome(name):\n return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Hello World!\n Start typing below to see the output.\n \"\"\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n inp.change(welcome, inp, out)\n\nif __name__ == \"__main__\":\n demo.launch()"], ["blocks_kinematics", "import pandas as pd\nimport numpy as np\n\nimport gradio as gr\n\n\ndef plot(v, a):\n g = 9.81\n theta = a / 180 * 3.14\n tmax = ((2 * v) * np.sin(theta)) / g\n timemat = tmax * np.linspace(0, 1, 40)\n\n x = (v * timemat) * np.cos(theta)\n y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat**2))\n df = pd.DataFrame({\"x\": x, \"y\": y})\n return df\n\n\ndemo = gr.Blocks()\n\nwith demo:\n gr.Markdown(\n r\"Let's do some kinematics! Choose the speed and angle to see the trajectory. Remember that the range $R = v_0^2 \\cdot \\frac{\\sin(2\\theta)}{g}$\"\n )\n\n with gr.Row():\n speed = gr.Slider(1, 30, 25, label=\"Speed\")\n angle = gr.Slider(0, 90, 45, label=\"Angle\")\n output = gr.LinePlot(\n x=\"x\",\n y=\"y\",\n overlay_point=True,\n tooltip=[\"x\", \"y\"],\n x_lim=[0, 100],\n y_lim=[0, 60],\n width=350,\n height=300,\n )\n btn = gr.Button(value=\"Run\")\n btn.click(plot, [speed, angle], output)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [{"name": "key-features", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 2, "absolute_index": 1, "pretty_name": "Key Features", "content": "# Key Features\n\nLet's go through some of the most popular features of Gradio! Here are Gradio's key features:\n\n1. [Adding example inputs](#example-inputs)\n2. [Passing custom error messages](#alerts)\n3. [Adding descriptive content](#descriptive-content)\n4. [Setting up flagging](#flagging)\n5. [Preprocessing and postprocessing](#preprocessing-and-postprocessing)\n6. [Styling demos](#styling)\n7. [Queuing users](#queuing)\n8. [Iterative outputs](#iterative-outputs)\n9. [Progress bars](#progress-bars)\n10. [Batch functions](#batch-functions)\n11. [Running on collaborative notebooks](#colab-notebooks)\n\n## Example Inputs\n\nYou can provide example data that a user can easily load into `Interface`. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a **nested list** to the `examples=` keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the [Docs](https://gradio.app/docs#components).\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n if num2 == 0:\n raise gr.Error(\"Cannot divide by zero!\")\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\", \n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n examples=[\n [5, \"add\", 3],\n [4, \"divide\", 2],\n [-4, \"multiply\", 2.5],\n [0, \"subtract\", 1.2],\n ],\n title=\"Toy Calculator\",\n description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n```\n\n\nYou can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the `examples_per_page` argument of `Interface`).\n\nContinue learning about examples in the [More On Examples](https://gradio.app/more-on-examples) guide.\n\n## Alerts\n\nYou wish to pass custom error messages to the user. To do so, raise a `gr.Error(\"custom message\")` to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the [docs](https://gradio.app/docs#error). \n\nYou can also issue `gr.Warning(\"message\")` and `gr.Info(\"message\")` by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work. \n\nNote below how the `gr.Error` has to be raised, while the `gr.Warning` and `gr.Info` are single lines.\n\n```python\ndef start_process(name):\n gr.Info(\"Starting process\")\n if name is None:\n gr.Warning(\"Name is empty\")\n ...\n if success == False:\n raise gr.Error(\"Process failed\")\n```\n \n## Descriptive Content\n\nIn the previous example, you may have noticed the `title=` and `description=` keyword arguments in the `Interface` constructor that helps users understand your app.\n\nThere are three arguments in the `Interface` constructor to specify where this content should go:\n\n* `title`: which accepts text and can display it at the very top of interface, and also becomes the page title.\n* `description`: which accepts text, markdown or HTML and places it right under the title.\n* `article`: which also accepts text, markdown or HTML and places it below the interface.\n\n![annotated](https://github.com/gradio-app/gradio/blob/main/guides/assets/annotated.png?raw=true)\n\nIf you're using the `Blocks` API instead, you can insert text, markdown, or HTML anywhere using the `gr.Markdown(...)` or `gr.HTML(...)` components, with descriptive content inside the `Component` constructor.\n\nAnother useful keyword argument is `label=`, which is present in every `Component`. This modifies the label text at the top of each `Component`. You can also add the `info=` keyword argument to form elements like `Textbox` or `Radio` to provide further information on their usage.\n\n```python\ngr.Number(label='Age', info='In years, must be greater than 0')\n```\n\n## Flagging\n\nBy default, an `Interface` will have \"Flag\" button. When a user testing your `Interface` sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the `flagging_dir=` argument to the `Interface` constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.\n\nFor example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- calculator.py\n+-- flagged/\n| +-- logs.csv\n```\n\n*flagged/logs.csv*\n\n```csv\nnum1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n```\n\nWith the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- sepia.py\n+-- flagged/\n| +-- logs.csv\n| +-- im/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n\n*flagged/logs.csv*\n\n```csv\nim,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.\n\n## Preprocessing and Postprocessing\n\n![](https://github.com/gradio-app/gradio/blob/main/js/_website/src/assets/img/dataflow.svg?raw=true)\n\nAs you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.\n\nWhen a component is used as an input, Gradio automatically handles the *preprocessing* needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a `numpy` array).\n\nSimilarly, when a component is used as an output, Gradio automatically handles the *postprocessing* needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a `Gallery` of images in base64 format).\n\nYou can control the *preprocessing* using the parameters when constructing the image component. For example, here if you instantiate the `Image` component with the following parameters, it will convert the image to the `PIL` type and reshape it to be `(100, 100)` no matter the original size that it was submitted as:\n\n```py\nimg = gr.Image(shape=(100, 100), type=\"pil\")\n```\n\nIn contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:\n\n```py\nimg = gr.Image(invert_colors=True, type=\"numpy\")\n```\n\nPostprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the `Image` a `numpy` array or a `str` filepath?) and postprocesses it into a format that can be displayed by the browser.\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the preprocessing-related parameters for each Component.\n\n## Styling\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Interface` constructor. For example:\n\n```python\ndemo = gr.Interface(..., theme=gr.themes.Monochrome())\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](https://gradio.app/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n\n```python\nwith gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nSome components can be additionally styled through the `style()` method. For example:\n\n```python\nimg = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n```\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the styling options for each Component.\n\n## Queuing\n\nIf your app expects heavy traffic, use the `queue()` method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(...).queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```python\nwith gr.Blocks() as demo:\n #...\ndemo.queue()\ndemo.launch()\n```\n\nYou can control the number of requests processed at a single time as such:\n\n```python\ndemo.queue(concurrency_count=3)\n```\n\nSee the [Docs on queueing](/docs/#queue) on configuring other queuing parameters.\n\nTo specify only certain functions for queueing in Blocks:\n\n```python\nwith gr.Blocks() as demo2:\n num1 = gr.Number()\n num2 = gr.Number()\n output = gr.Number()\n gr.Button(\"Add\").click(\n lambda a, b: a + b, [num1, num2], output)\n gr.Button(\"Multiply\").click(\n lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n```\n\n## Iterative Outputs\n\nIn some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.\n\nIn such cases, you can supply a **generator** function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single `return` value, a function should `yield` a series of values instead. Usually the `yield` statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:\n\n```python\ndef my_generator(x):\n for i in range(x):\n yield i\n```\n\nYou supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:\n\n```python\nimport gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n for _ in range(steps):\n time.sleep(1)\n image = np.random.random((600, 600, 3))\n yield image\n image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n```\n\n\nNote that we've added a `time.sleep(1)` in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).\n\nSupplying a generator into Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n## Progress Bars\n\nGradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a `gr.Progress` instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the `tqdm()` method of the `Progress` instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.\n\n```python\nimport gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n progress(0, desc=\"Starting\")\n time.sleep(1)\n progress(0.05)\n new_string = \"\"\n for letter in progress.tqdm(word, desc=\"Reversing\"):\n time.sleep(0.25)\n new_string = letter + new_string\n return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n demo.queue(concurrency_count=10).launch()\n\n```\n\n\nIf you use the `tqdm` library, you can even report progress updates automatically from any `tqdm.tqdm` that already exists within your function by setting the default argument as `gr.Progress(track_tqdm=True)`!\n\n## Batch Functions\n\nGradio supports the ability to pass *batch* functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.\n\nFor example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:\n\n```py\nimport time\n\ndef trim_words(words, lens):\n trimmed_words = []\n time.sleep(5)\n for w, l in zip(words, lens):\n trimmed_words.append(w[:int(l)]) \n return [trimmed_words]\n```\n\nThe advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically *batch* incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe `batch=True` and `max_batch_size=16` -- both of these parameters can be passed\ninto event triggers or into the `Interface` class)\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n word = gr.Textbox(label=\"word\")\n leng = gr.Number(label=\"leng\")\n output = gr.Textbox(label=\"Output\")\n with gr.Row():\n run = gr.Button()\n\n event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n```\n\nIn the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face `transformers` and `diffusers` models\nwork very naturally with Gradio's batch mode: here's [an example demo using diffusers to\ngenerate images in batches](https://github.com/gradio-app/gradio/blob/main/demo/diffusers_with_batching/run.py)\n\nNote: using batch functions with Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n\n## Colab Notebooks\n\n\nGradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as [Google Colab](https://colab.research.google.com/). In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by [service worker tunneling](https://github.com/tensorflow/tensorboard/blob/master/docs/design/colab_integration.md), which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use [SSH tunneling](https://coderwall.com/p/ohk6cg/remote-access-to-ipython-notebooks-via-ssh) to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, [discussed in the next Guide](https://gradio.app/guides/sharing-your-app/#sharing-demos). ", "html": "

Key Features

\n\n

Let's go through some of the most popular features of Gradio! Here are Gradio's key features:

\n\n
    \n
  1. Adding example inputs
  2. \n
  3. Passing custom error messages
  4. \n
  5. Adding descriptive content
  6. \n
  7. Setting up flagging
  8. \n
  9. Preprocessing and postprocessing
  10. \n
  11. Styling demos
  12. \n
  13. Queuing users
  14. \n
  15. Iterative outputs
  16. \n
  17. Progress bars
  18. \n
  19. Batch functions
  20. \n
  21. Running on collaborative notebooks
  22. \n
\n\n

Example Inputs

\n\n

You can provide example data that a user can easily load into Interface. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a nested list to the examples= keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the Docs.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        if num2 == 0:\n            raise gr.Error(\"Cannot divide by zero!\")\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\", \n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    examples=[\n        [5, \"add\", 3],\n        [4, \"divide\", 2],\n        [-4, \"multiply\", 2.5],\n        [0, \"subtract\", 1.2],\n    ],\n    title=\"Toy Calculator\",\n    description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n
\n\n

\n\n

You can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the examples_per_page argument of Interface).

\n\n

Continue learning about examples in the More On Examples guide.

\n\n

Alerts

\n\n

You wish to pass custom error messages to the user. To do so, raise a gr.Error(\"custom message\") to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the docs.

\n\n

You can also issue gr.Warning(\"message\") and gr.Info(\"message\") by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work.

\n\n

Note below how the gr.Error has to be raised, while the gr.Warning and gr.Info are single lines.

\n\n
def start_process(name):\n    gr.Info(\"Starting process\")\n    if name is None:\n        gr.Warning(\"Name is empty\")\n    ...\n    if success == False:\n        raise gr.Error(\"Process failed\")\n
\n\n

Descriptive Content

\n\n

In the previous example, you may have noticed the title= and description= keyword arguments in the Interface constructor that helps users understand your app.

\n\n

There are three arguments in the Interface constructor to specify where this content should go:

\n\n
    \n
  • title: which accepts text and can display it at the very top of interface, and also becomes the page title.
  • \n
  • description: which accepts text, markdown or HTML and places it right under the title.
  • \n
  • article: which also accepts text, markdown or HTML and places it below the interface.
  • \n
\n\n

\"annotated\"

\n\n

If you're using the Blocks API instead, you can insert text, markdown, or HTML anywhere using the gr.Markdown(...) or gr.HTML(...) components, with descriptive content inside the Component constructor.

\n\n

Another useful keyword argument is label=, which is present in every Component. This modifies the label text at the top of each Component. You can also add the info= keyword argument to form elements like Textbox or Radio to provide further information on their usage.

\n\n
gr.Number(label='Age', info='In years, must be greater than 0')\n
\n\n

Flagging

\n\n

By default, an Interface will have \"Flag\" button. When a user testing your Interface sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the flagging_dir= argument to the Interface constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.

\n\n

For example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- calculator.py\n+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n
\n\n

With the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- sepia.py\n+-- flagged/\n|   +-- logs.csv\n|   +-- im/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.

\n\n

Preprocessing and Postprocessing

\n\n

\"\"

\n\n

As you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.

\n\n

When a component is used as an input, Gradio automatically handles the preprocessing needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a numpy array).

\n\n

Similarly, when a component is used as an output, Gradio automatically handles the postprocessing needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a Gallery of images in base64 format).

\n\n

You can control the preprocessing using the parameters when constructing the image component. For example, here if you instantiate the Image component with the following parameters, it will convert the image to the PIL type and reshape it to be (100, 100) no matter the original size that it was submitted as:

\n\n
img = gr.Image(shape=(100, 100), type=\"pil\")\n
\n\n

In contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:

\n\n
img = gr.Image(invert_colors=True, type=\"numpy\")\n
\n\n

Postprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the Image a numpy array or a str filepath?) and postprocesses it into a format that can be displayed by the browser.

\n\n

Take a look at the Docs to see all the preprocessing-related parameters for each Component.

\n\n

Styling

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Interface constructor. For example:

\n\n
demo = gr.Interface(..., theme=gr.themes.Monochrome())\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.\nThe base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

Some components can be additionally styled through the style() method. For example:

\n\n
img = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n
\n\n

Take a look at the Docs to see all the styling options for each Component.

\n\n

Queuing

\n\n

If your app expects heavy traffic, use the queue() method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).

\n\n

With Interface:

\n\n
demo = gr.Interface(...).queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
with gr.Blocks() as demo:\n    #...\ndemo.queue()\ndemo.launch()\n
\n\n

You can control the number of requests processed at a single time as such:

\n\n
demo.queue(concurrency_count=3)\n
\n\n

See the Docs on queueing on configuring other queuing parameters.

\n\n

To specify only certain functions for queueing in Blocks:

\n\n
with gr.Blocks() as demo2:\n    num1 = gr.Number()\n    num2 = gr.Number()\n    output = gr.Number()\n    gr.Button(\"Add\").click(\n        lambda a, b: a + b, [num1, num2], output)\n    gr.Button(\"Multiply\").click(\n        lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n
\n\n

Iterative Outputs

\n\n

In some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.

\n\n

In such cases, you can supply a generator function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single return value, a function should yield a series of values instead. Usually the yield statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:

\n\n
def my_generator(x):\n    for i in range(x):\n        yield i\n
\n\n

You supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:

\n\n
import gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n    for _ in range(steps):\n        time.sleep(1)\n        image = np.random.random((600, 600, 3))\n        yield image\n    image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n    yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n
\n\n

\n\n

Note that we've added a time.sleep(1) in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).

\n\n

Supplying a generator into Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Progress Bars

\n\n

Gradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a gr.Progress instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the tqdm() method of the Progress instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.

\n\n
import gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n    progress(0, desc=\"Starting\")\n    time.sleep(1)\n    progress(0.05)\n    new_string = \"\"\n    for letter in progress.tqdm(word, desc=\"Reversing\"):\n        time.sleep(0.25)\n        new_string = letter + new_string\n    return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n    demo.queue(concurrency_count=10).launch()\n\n
\n\n

\n\n

If you use the tqdm library, you can even report progress updates automatically from any tqdm.tqdm that already exists within your function by setting the default argument as gr.Progress(track_tqdm=True)!

\n\n

Batch Functions

\n\n

Gradio supports the ability to pass batch functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.

\n\n

For example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:

\n\n
import time\n\ndef trim_words(words, lens):\n    trimmed_words = []\n    time.sleep(5)\n    for w, l in zip(words, lens):\n        trimmed_words.append(w[:int(l)])        \n    return [trimmed_words]\n
\n\n

The advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically batch incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe batch=True and max_batch_size=16 -- both of these parameters can be passed\ninto event triggers or into the Interface class)

\n\n

With Interface:

\n\n
demo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n                    batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        word = gr.Textbox(label=\"word\")\n        leng = gr.Number(label=\"leng\")\n        output = gr.Textbox(label=\"Output\")\n    with gr.Row():\n        run = gr.Button()\n\n    event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n
\n\n

In the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face transformers and diffusers models\nwork very naturally with Gradio's batch mode: here's an example demo using diffusers to\ngenerate images in batches

\n\n

Note: using batch functions with Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Colab Notebooks

\n\n

Gradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as Google Colab. In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by service worker tunneling, which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use SSH tunneling to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, discussed in the next Guide.

\n", "tags": [], "spaces": [], "url": "/guides/key-features/", "contributor": null}], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects a valid str that can be rendered as Markdown.", "parent": "gradio", "prev_obj": "LinePlot", "next_obj": "Model3D"}, "model3d": {"class": null, "name": "Model3D", "description": "Component allows users to upload or view 3D Model files (.obj, .glb, or .gltf).
", "tags": {"preprocessing": "This component passes the uploaded file as a {str}filepath.", "postprocessing": "expects function to return a {str} or {pathlib.Path} filepath of type (.obj, glb, or .gltf)", "demos": "model3D", "guides": "how-to-use-3D-model-component"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | Callable | None", "doc": "path to (.obj, glb, or .gltf) file to show in model3D viewer. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "clear_color", "annotation": "list[float] | None", "doc": "background color of scene", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Model3D"}, {"fn": null, "name": "edit", "description": "This listener is triggered when the user edits the component (e.g. image) using the built-in editor. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Model3D"}, {"fn": null, "name": "clear", "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Model3D"}, {"fn": null, "name": "upload", "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Model3D"}], "string_shortcuts": [["Model3D", "model3d", "Uses default values"]], "demos": [["model3D", "import gradio as gr\nimport os\n\n\ndef load_mesh(mesh_file_name):\n return mesh_file_name\n\n\ndemo = gr.Interface(\n fn=load_mesh,\n inputs=gr.Model3D(),\n outputs=gr.Model3D(\n clear_color=[0.0, 0.0, 0.0, 0.0], label=\"3D Model\"),\n examples=[\n [os.path.join(os.path.dirname(__file__), \"files/Bunny.obj\")],\n [os.path.join(os.path.dirname(__file__), \"files/Duck.glb\")],\n [os.path.join(os.path.dirname(__file__), \"files/Fox.gltf\")],\n [os.path.join(os.path.dirname(__file__), \"files/face.obj\")],\n ],\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [{"name": "how-to-use-3D-model-component", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 38, "pretty_name": "How To Use 3D Model Component", "content": "# How to Use the 3D Model Component\n\n\n\n\n## Introduction\n\n3D models are becoming more popular in machine learning and make for some of the most fun demos to experiment with. Using `gradio`, you can easily build a demo of your 3D image model and share it with anyone. The Gradio 3D Model component accepts 3 file types including: *.obj*, *.glb*, & *.gltf*.\n\nThis guide will show you how to build a demo for your 3D image model in a few lines of code; like the one below. Play around with 3D object by clicking around, dragging and zooming:\n\n \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](https://gradio.app/guides/quickstart).\n\n\n## Taking a Look at the Code\n\nLet's take a look at how to create the minimal interface above. The prediction function in this case will just return the original 3D model mesh, but you can change this function to run inference on your machine learning model. We'll take a look at more complex examples below.\n\n```python\nimport gradio as gr\n\ndef load_mesh(mesh_file_name):\n return mesh_file_name\n\ndemo = gr.Interface(\n fn=load_mesh,\n inputs=gr.Model3D(),\n outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label=\"3D Model\"),\n examples=[\n [\"files/Bunny.obj\"],\n [\"files/Duck.glb\"],\n [\"files/Fox.gltf\"],\n [\"files/face.obj\"],\n ],\n cache_examples=True,\n)\n\ndemo.launch()\n```\n\nLet's break down the code above:\n\n`load_mesh`: This is our 'prediction' function and for simplicity, this function will take in the 3D model mesh and return it.\n\nCreating the Interface:\n\n* `fn`: the prediction function that is used when the user clicks submit. In our case this is the `load_mesh` function.\n* `inputs`: create a model3D input component. The input expects an uploaded file as a {str} filepath.\n* `outputs`: create a model3D output component. The output component also expects a file as a {str} filepath.\n * `clear_color`: this is the background color of the 3D model canvas. Expects RGBa values.\n * `label`: the label that appears on the top left of the component.\n* `examples`: list of 3D model files. The 3D model component can accept *.obj*, *.glb*, & *.gltf* file types.\n* `cache_examples`: saves the predicted output for the examples, to save time on inference.\n\n\n## Exploring mode complex Model3D Demos:\n\nBelow is a demo that uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object. Take a look at the [app.py](https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-obj/blob/main/app.py) file for a peek into the code and the model prediction function.\n \n\nBelow is a demo that uses the PIFu model to convert an image of a clothed human into a 3D digitized model. Take a look at the [spaces.py](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization/blob/main/PIFu/spaces.py) file for a peek into the code and the model prediction function.\n\n \n\n----------\n\nAnd you're done! That's all the code you need to build an interface for your Model3D model. Here are some references that you may find useful:\n\n* Gradio's [\"Getting Started\" guide](https://gradio.app/getting_started/)\n* The first [3D Model Demo](https://huggingface.co/spaces/dawood/Model3D) and [complete code](https://huggingface.co/spaces/dawood/Model3D/tree/main) (on Hugging Face Spaces)\n", "html": "

How to Use the 3D Model Component

\n\n

Introduction

\n\n

3D models are becoming more popular in machine learning and make for some of the most fun demos to experiment with. Using gradio, you can easily build a demo of your 3D image model and share it with anyone. The Gradio 3D Model component accepts 3 file types including: .obj, .glb, & .gltf.

\n\n

This guide will show you how to build a demo for your 3D image model in a few lines of code; like the one below. Play around with 3D object by clicking around, dragging and zooming:

\n\n

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Taking a Look at the Code

\n\n

Let's take a look at how to create the minimal interface above. The prediction function in this case will just return the original 3D model mesh, but you can change this function to run inference on your machine learning model. We'll take a look at more complex examples below.

\n\n
import gradio as gr\n\ndef load_mesh(mesh_file_name):\n    return mesh_file_name\n\ndemo = gr.Interface(\n    fn=load_mesh,\n    inputs=gr.Model3D(),\n    outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0],  label=\"3D Model\"),\n    examples=[\n        [\"files/Bunny.obj\"],\n        [\"files/Duck.glb\"],\n        [\"files/Fox.gltf\"],\n        [\"files/face.obj\"],\n    ],\n    cache_examples=True,\n)\n\ndemo.launch()\n
\n\n

Let's break down the code above:

\n\n

load_mesh: This is our 'prediction' function and for simplicity, this function will take in the 3D model mesh and return it.

\n\n

Creating the Interface:

\n\n
    \n
  • fn: the prediction function that is used when the user clicks submit. In our case this is the load_mesh function.
  • \n
  • inputs: create a model3D input component. The input expects an uploaded file as a {str} filepath.
  • \n
  • outputs: create a model3D output component. The output component also expects a file as a {str} filepath.\n
      \n
    • clear_color: this is the background color of the 3D model canvas. Expects RGBa values.
    • \n
    • label: the label that appears on the top left of the component.
    • \n
  • \n
  • examples: list of 3D model files. The 3D model component can accept .obj, .glb, & .gltf file types.
  • \n
  • cache_examples: saves the predicted output for the examples, to save time on inference.
  • \n
\n\n

Exploring mode complex Model3D Demos:

\n\n

Below is a demo that uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object. Take a look at the app.py file for a peek into the code and the model prediction function.\n

\n\n

Below is a demo that uses the PIFu model to convert an image of a clothed human into a 3D digitized model. Take a look at the spaces.py file for a peek into the code and the model prediction function.

\n\n

\n\n
\n\n

And you're done! That's all the code you need to build an interface for your Model3D model. Here are some references that you may find useful:

\n\n\n", "tags": ["VISION", "IMAGE"], "spaces": ["https://huggingface.co/spaces/dawood/Model3D", "https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization", "https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-obj"], "url": "/guides/how-to-use-3D-model-component/", "contributor": null}], "preprocessing": "This component passes the uploaded file as a strfilepath.", "postprocessing": "expects function to return a str or pathlib.Path filepath of type (.obj, glb, or .gltf)", "parent": "gradio", "prev_obj": "Markdown", "next_obj": "Number"}, "number": {"class": null, "name": "Number", "description": "Creates a numeric field for user to enter numbers as input or display numeric output.
", "tags": {"preprocessing": "passes field value as a {float} or {int} into the function, depending on `precision`.", "postprocessing": "expects an {int} or {float} returned from the function and sets field value to it.", "examples-format": "a {float} or {int} representing the number's value.", "demos": "tax_calculator, titanic_survival, blocks_simple_squares"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "float | Callable | None", "doc": "default value. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "info", "annotation": "str | None", "doc": "additional component description.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, will be editable; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "precision", "annotation": "int | None", "doc": "Precision to round input/output to. If set to 0, will round to nearest integer and convert type to int. If None, no rounding happens.", "default": "None"}, {"name": "minimum", "annotation": "float | None", "doc": "Minimum value. Only applied when component is used as an input. If a user provides a smaller value, a gr.Error exception is raised by the backend.", "default": "None"}, {"name": "maximum", "annotation": "float | None", "doc": "Maximum value. Only applied when component is used as an input. If a user provides a larger value, a gr.Error exception is raised by the backend.", "default": "None"}, {"name": "step", "annotation": "float", "doc": "The interval between allowed numbers in the component. Can be used along with optional parameters `minimum` and `maximum` to create a range of legal values starting from `minimum` and incrementing according to this parameter.", "default": "1"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Number"}, {"fn": null, "name": "input", "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Number"}, {"fn": null, "name": "submit", "description": "This listener is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Number"}, {"fn": null, "name": "focus", "description": "This listener is triggered when the component is focused (e.g. when the user clicks inside a textbox). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Number"}, {"fn": null, "name": "blur", "description": "This listener is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Number"}], "string_shortcuts": [["Number", "number", "Uses default values"]], "demos": [["tax_calculator", "import gradio as gr\n\ndef tax_calculator(income, marital_status, assets):\n tax_brackets = [(10, 0), (25, 8), (60, 12), (120, 20), (250, 30)]\n total_deductible = sum(assets[\"Cost\"])\n taxable_income = income - total_deductible\n\n total_tax = 0\n for bracket, rate in tax_brackets:\n if taxable_income > bracket:\n total_tax += (taxable_income - bracket) * rate / 100\n\n if marital_status == \"Married\":\n total_tax *= 0.75\n elif marital_status == \"Divorced\":\n total_tax *= 0.8\n\n return round(total_tax)\n\ndemo = gr.Interface(\n tax_calculator,\n [\n \"number\",\n gr.Radio([\"Single\", \"Married\", \"Divorced\"]),\n gr.Dataframe(\n headers=[\"Item\", \"Cost\"],\n datatype=[\"str\", \"number\"],\n label=\"Assets Purchased this Year\",\n ),\n ],\n \"number\",\n examples=[\n [10000, \"Married\", [[\"Suit\", 5000], [\"Laptop\", 800], [\"Car\", 1800]]],\n [80000, \"Single\", [[\"Suit\", 800], [\"Watch\", 1800], [\"Car\", 800]]],\n ],\n)\n\ndemo.launch()\n"], ["titanic_survival", "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["blocks_simple_squares", "import gradio as gr\n\ndemo = gr.Blocks(css=\"\"\"#btn {color: red} .abc {font-family: \"Comic Sans MS\", \"Comic Sans\", cursive !important}\"\"\")\n\nwith demo:\n default_json = {\"a\": \"a\"}\n\n num = gr.State(value=0)\n squared = gr.Number(value=0)\n btn = gr.Button(\"Next Square\", elem_id=\"btn\", elem_classes=[\"abc\", \"def\"])\n\n stats = gr.State(value=default_json)\n table = gr.JSON()\n\n def increase(var, stats_history):\n var += 1\n stats_history[str(var)] = var**2\n return var, var**2, stats_history, stats_history\n\n btn.click(increase, [num, stats], [num, squared, stats, table])\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "passes field value as a float or int into the function, depending on `precision`.", "postprocessing": "expects an int or float returned from the function and sets field value to it.", "examples-format": "a float or int representing the number's value.", "parent": "gradio", "prev_obj": "Model3D", "next_obj": "Plot"}, "plot": {"class": null, "name": "Plot", "description": "Used to display various kinds of plots (matplotlib, plotly, or bokeh are supported)
", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects either a {matplotlib.figure.Figure}, a {plotly.graph_objects._figure.Figure}, or a {dict} corresponding to a bokeh plot (json_item format)", "demos": "altair_plot, outbreak_forecast, blocks_kinematics, stock_forecast, map_airbnb", "guides": "plot-component-for-maps"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "Callable | None | pd.DataFrame", "doc": "Optionally, supply a default plot object to display, must be a matplotlib, plotly, altair, or bokeh figure, or a callable. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Plot"}, {"fn": null, "name": "clear", "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Plot"}], "string_shortcuts": [["Plot", "plot", "Uses default values"]], "demos": [["altair_plot", "import altair as alt\nimport gradio as gr\nimport numpy as np\nimport pandas as pd\nfrom vega_datasets import data\n\n\ndef make_plot(plot_type):\n if plot_type == \"scatter_plot\":\n cars = data.cars()\n return alt.Chart(cars).mark_point().encode(\n x='Horsepower',\n y='Miles_per_Gallon',\n color='Origin',\n )\n elif plot_type == \"heatmap\":\n # Compute x^2 + y^2 across a 2D grid\n x, y = np.meshgrid(range(-5, 5), range(-5, 5))\n z = x ** 2 + y ** 2\n\n # Convert this grid to columnar data expected by Altair\n source = pd.DataFrame({'x': x.ravel(),\n 'y': y.ravel(),\n 'z': z.ravel()})\n return alt.Chart(source).mark_rect().encode(\n x='x:O',\n y='y:O',\n color='z:Q'\n )\n elif plot_type == \"us_map\":\n states = alt.topo_feature(data.us_10m.url, 'states')\n source = data.income.url\n\n return alt.Chart(source).mark_geoshape().encode(\n shape='geo:G',\n color='pct:Q',\n tooltip=['name:N', 'pct:Q'],\n facet=alt.Facet('group:N', columns=2),\n ).transform_lookup(\n lookup='id',\n from_=alt.LookupData(data=states, key='id'),\n as_='geo'\n ).properties(\n width=300,\n height=175,\n ).project(\n type='albersUsa'\n )\n elif plot_type == \"interactive_barplot\":\n source = data.movies.url\n\n pts = alt.selection(type=\"single\", encodings=['x'])\n\n rect = alt.Chart(data.movies.url).mark_rect().encode(\n alt.X('IMDB_Rating:Q', bin=True),\n alt.Y('Rotten_Tomatoes_Rating:Q', bin=True),\n alt.Color('count()',\n scale=alt.Scale(scheme='greenblue'),\n legend=alt.Legend(title='Total Records')\n )\n )\n\n circ = rect.mark_point().encode(\n alt.ColorValue('grey'),\n alt.Size('count()',\n legend=alt.Legend(title='Records in Selection')\n )\n ).transform_filter(\n pts\n )\n\n bar = alt.Chart(source).mark_bar().encode(\n x='Major_Genre:N',\n y='count()',\n color=alt.condition(pts, alt.ColorValue(\"steelblue\"), alt.ColorValue(\"grey\"))\n ).properties(\n width=550,\n height=200\n ).add_selection(pts)\n\n plot = alt.vconcat(\n rect + circ,\n bar\n ).resolve_legend(\n color=\"independent\",\n size=\"independent\"\n )\n return plot\n elif plot_type == \"radial\":\n source = pd.DataFrame({\"values\": [12, 23, 47, 6, 52, 19]})\n\n base = alt.Chart(source).encode(\n theta=alt.Theta(\"values:Q\", stack=True),\n radius=alt.Radius(\"values\", scale=alt.Scale(type=\"sqrt\", zero=True, rangeMin=20)),\n color=\"values:N\",\n )\n\n c1 = base.mark_arc(innerRadius=20, stroke=\"#fff\")\n\n c2 = base.mark_text(radiusOffset=10).encode(text=\"values:Q\")\n\n return c1 + c2\n elif plot_type == \"multiline\":\n source = data.stocks()\n\n highlight = alt.selection(type='single', on='mouseover',\n fields=['symbol'], nearest=True)\n\n base = alt.Chart(source).encode(\n x='date:T',\n y='price:Q',\n color='symbol:N'\n )\n\n points = base.mark_circle().encode(\n opacity=alt.value(0)\n ).add_selection(\n highlight\n ).properties(\n width=600\n )\n\n lines = base.mark_line().encode(\n size=alt.condition(~highlight, alt.value(1), alt.value(3))\n )\n\n return points + lines\n\n\nwith gr.Blocks() as demo:\n button = gr.Radio(label=\"Plot type\",\n choices=['scatter_plot', 'heatmap', 'us_map',\n 'interactive_barplot', \"radial\", \"multiline\"], value='scatter_plot')\n plot = gr.Plot(label=\"Plot\")\n button.change(make_plot, inputs=button, outputs=[plot])\n demo.load(make_plot, inputs=[button], outputs=[plot])\n\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["outbreak_forecast", "import altair\n\nimport gradio as gr\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport plotly.express as px\nimport pandas as pd\n\n\ndef outbreak(plot_type, r, month, countries, social_distancing):\n months = [\"January\", \"February\", \"March\", \"April\", \"May\"]\n m = months.index(month)\n start_day = 30 * m\n final_day = 30 * (m + 1)\n x = np.arange(start_day, final_day + 1)\n pop_count = {\"USA\": 350, \"Canada\": 40, \"Mexico\": 300, \"UK\": 120}\n if social_distancing:\n r = sqrt(r)\n df = pd.DataFrame({\"day\": x})\n for country in countries:\n df[country] = x ** (r) * (pop_count[country] + 1)\n\n if plot_type == \"Matplotlib\":\n fig = plt.figure()\n plt.plot(df[\"day\"], df[countries].to_numpy())\n plt.title(\"Outbreak in \" + month)\n plt.ylabel(\"Cases\")\n plt.xlabel(\"Days since Day 0\")\n plt.legend(countries)\n return fig\n elif plot_type == \"Plotly\":\n fig = px.line(df, x=\"day\", y=countries)\n fig.update_layout(\n title=\"Outbreak in \" + month,\n xaxis_title=\"Cases\",\n yaxis_title=\"Days Since Day 0\",\n )\n return fig\n elif plot_type == \"Altair\":\n df = df.melt(id_vars=\"day\").rename(columns={\"variable\": \"country\"})\n fig = altair.Chart(df).mark_line().encode(x=\"day\", y='value', color='country')\n return fig\n else:\n raise ValueError(\"A plot type must be selected\")\n\n\ninputs = [\n gr.Dropdown([\"Matplotlib\", \"Plotly\", \"Altair\"], label=\"Plot Type\"),\n gr.Slider(1, 4, 3.2, label=\"R\"),\n gr.Dropdown([\"January\", \"February\", \"March\", \"April\", \"May\"], label=\"Month\"),\n gr.CheckboxGroup(\n [\"USA\", \"Canada\", \"Mexico\", \"UK\"], label=\"Countries\", value=[\"USA\", \"Canada\"]\n ),\n gr.Checkbox(label=\"Social Distancing?\"),\n]\noutputs = gr.Plot()\n\ndemo = gr.Interface(\n fn=outbreak,\n inputs=inputs,\n outputs=outputs,\n examples=[\n [\"Matplotlib\", 2, \"March\", [\"Mexico\", \"UK\"], True],\n [\"Altair\", 2, \"March\", [\"Mexico\", \"Canada\"], True],\n [\"Plotly\", 3.6, \"February\", [\"Canada\", \"Mexico\", \"UK\"], False],\n ],\n cache_examples=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n\n\n"], ["blocks_kinematics", "import pandas as pd\nimport numpy as np\n\nimport gradio as gr\n\n\ndef plot(v, a):\n g = 9.81\n theta = a / 180 * 3.14\n tmax = ((2 * v) * np.sin(theta)) / g\n timemat = tmax * np.linspace(0, 1, 40)\n\n x = (v * timemat) * np.cos(theta)\n y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat**2))\n df = pd.DataFrame({\"x\": x, \"y\": y})\n return df\n\n\ndemo = gr.Blocks()\n\nwith demo:\n gr.Markdown(\n r\"Let's do some kinematics! Choose the speed and angle to see the trajectory. Remember that the range $R = v_0^2 \\cdot \\frac{\\sin(2\\theta)}{g}$\"\n )\n\n with gr.Row():\n speed = gr.Slider(1, 30, 25, label=\"Speed\")\n angle = gr.Slider(0, 90, 45, label=\"Angle\")\n output = gr.LinePlot(\n x=\"x\",\n y=\"y\",\n overlay_point=True,\n tooltip=[\"x\", \"y\"],\n x_lim=[0, 100],\n y_lim=[0, 60],\n width=350,\n height=300,\n )\n btn = gr.Button(value=\"Run\")\n btn.click(plot, [speed, angle], output)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["stock_forecast", "import matplotlib.pyplot as plt\nimport numpy as np\n\nimport gradio as gr\n\n\ndef plot_forecast(final_year, companies, noise, show_legend, point_style):\n start_year = 2020\n x = np.arange(start_year, final_year + 1)\n year_count = x.shape[0]\n plt_format = ({\"cross\": \"X\", \"line\": \"-\", \"circle\": \"o--\"})[point_style]\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for i, company in enumerate(companies):\n series = np.arange(0, year_count, dtype=float)\n series = series**2 * (i + 1)\n series += np.random.rand(year_count) * noise\n ax.plot(x, series, plt_format)\n if show_legend:\n plt.legend(companies)\n return fig\n\n\ndemo = gr.Interface(\n plot_forecast,\n [\n gr.Radio([2025, 2030, 2035, 2040], label=\"Project to:\"),\n gr.CheckboxGroup([\"Google\", \"Microsoft\", \"Gradio\"], label=\"Company Selection\"),\n gr.Slider(1, 100, label=\"Noise Level\"),\n gr.Checkbox(label=\"Show Legend\"),\n gr.Dropdown([\"cross\", \"line\", \"circle\"], label=\"Style\"),\n ],\n gr.Plot(label=\"forecast\"),\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["map_airbnb", "import gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = filtered_df[\"name\"].tolist()\n prices = filtered_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n fig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=filtered_df['latitude'].tolist(),\n lon=filtered_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\nif __name__ == \"__main__\":\n demo.launch()"]], "guides": [{"name": "plot-component-for-maps", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 27, "pretty_name": "Plot Component For Maps", "content": "# How to Use the Plot Component for Maps\n\n\n\n## Introduction\n\nThis guide explains how you can use Gradio to plot geographical data on a map using the `gradio.Plot` component. The Gradio `Plot` component works with Matplotlib, Bokeh and Plotly. Plotly is what we will be working with in this guide. Plotly allows developers to easily create all sorts of maps with their geographical data. Take a look [here](https://plotly.com/python/maps/) for some examples.\n\n## Overview\n\nWe will be using the New York City Airbnb dataset, which is hosted on kaggle [here](https://www.kaggle.com/datasets/dgomonov/new-york-city-airbnb-open-data). I've uploaded it to the Hugging Face Hub as a dataset [here](https://huggingface.co/datasets/gradio/NYC-Airbnb-Open-Data) for easier use and download. Using this data we will plot Airbnb locations on a map output and allow filtering based on price and location. Below is the demo that we will be building. \u26a1\ufe0f\n\n\n\n## Step 1 - Loading CSV data \ud83d\udcbe\n\nLet's start by loading the Airbnb NYC data from the Hugging Face Hub.\n\n```python\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n new_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = new_df[\"name\"].tolist()\n prices = new_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n```\n\nIn the code above, we first load the csv data into a pandas dataframe. Let's begin by defining a function that we will use as the prediction function for the gradio app. This function will accept the minimum price and maximum price range as well as the list of boroughs to filter the resulting map. We can use the passed in values (`min_price`, `max_price`, and list of `boroughs`) to filter the dataframe and create `new_df`. Next we will create `text_list` of the names and prices of each Airbnb to use as labels on the map.\n\n## Step 2 - Map Figure \ud83c\udf10\n\nPlotly makes it easy to work with maps. Let's take a look below how we can create a map figure.\n\n```python\nimport plotly.graph_objects as go\n\nfig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=new_df['latitude'].tolist(),\n lon=new_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\nfig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n)\n```\n\nAbove, we create a scatter plot on mapbox by passing it our list of latitudes and longitudes to plot markers. We also pass in our custom data of names and prices for additional info to appear on every marker we hover over. Next we use `update_layout` to specify other map settings such as zoom, and centering.\n\nMore info [here](https://plotly.com/python/scattermapbox/) on scatter plots using Mapbox and Plotly.\n\n## Step 3 - Gradio App \u26a1\ufe0f\n\nWe will use two `gr.Number` components and a `gr.CheckboxGroup` to allow users of our app to specify price ranges and borough locations. We will then use the `gr.Plot` component as an output for our Plotly + Mapbox map we created earlier.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n```\n\nWe layout these components using the `gr.Column` and `gr.Row` and we'll also add event triggers for when the demo first loads and when our \"Update Filter\" button is clicked in order to trigger the map to update with our new filters.\n\nThis is what the full demo code looks like:\n\n```python\nimport gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = filtered_df[\"name\"].tolist()\n prices = filtered_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n fig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=filtered_df['latitude'].tolist(),\n lon=filtered_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\ndemo.launch()\n```\n\n## Step 4 - Deployment \ud83e\udd17\n\nIf you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the `share=True` parameter to `launch`.\n\nBut what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.\n\nIf you haven't used Spaces before, follow the previous guide [here](/using_hugging_face_integrations).\n\n## Conclusion \ud83c\udf89\n\nAnd you're all done! That's all the code you need to build a map demo.\n\nHere's a link to the demo [Map demo](https://huggingface.co/spaces/gradio/map_airbnb) and [complete code](https://huggingface.co/spaces/gradio/map_airbnb/blob/main/run.py) (on Hugging Face Spaces)\n", "html": "

How to Use the Plot Component for Maps

\n\n

Introduction

\n\n

This guide explains how you can use Gradio to plot geographical data on a map using the gradio.Plot component. The Gradio Plot component works with Matplotlib, Bokeh and Plotly. Plotly is what we will be working with in this guide. Plotly allows developers to easily create all sorts of maps with their geographical data. Take a look here for some examples.

\n\n

Overview

\n\n

We will be using the New York City Airbnb dataset, which is hosted on kaggle here. I've uploaded it to the Hugging Face Hub as a dataset here for easier use and download. Using this data we will plot Airbnb locations on a map output and allow filtering based on price and location. Below is the demo that we will be building. \u26a1\ufe0f

\n\n

\n\n

Step 1 - Loading CSV data \ud83d\udcbe

\n\n

Let's start by loading the Airbnb NYC data from the Hugging Face Hub.

\n\n
from datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n    new_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n            (df['price'] > min_price) & (df['price'] < max_price)]\n    names = new_df[\"name\"].tolist()\n    prices = new_df[\"price\"].tolist()\n    text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n
\n\n

In the code above, we first load the csv data into a pandas dataframe. Let's begin by defining a function that we will use as the prediction function for the gradio app. This function will accept the minimum price and maximum price range as well as the list of boroughs to filter the resulting map. We can use the passed in values (min_price, max_price, and list of boroughs) to filter the dataframe and create new_df. Next we will create text_list of the names and prices of each Airbnb to use as labels on the map.

\n\n

Step 2 - Map Figure \ud83c\udf10

\n\n

Plotly makes it easy to work with maps. Let's take a look below how we can create a map figure.

\n\n
import plotly.graph_objects as go\n\nfig = go.Figure(go.Scattermapbox(\n            customdata=text_list,\n            lat=new_df['latitude'].tolist(),\n            lon=new_df['longitude'].tolist(),\n            mode='markers',\n            marker=go.scattermapbox.Marker(\n                size=6\n            ),\n            hoverinfo=\"text\",\n            hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\nfig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n)\n
\n\n

Above, we create a scatter plot on mapbox by passing it our list of latitudes and longitudes to plot markers. We also pass in our custom data of names and prices for additional info to appear on every marker we hover over. Next we use update_layout to specify other map settings such as zoom, and centering.

\n\n

More info here on scatter plots using Mapbox and Plotly.

\n\n

Step 3 - Gradio App \u26a1\ufe0f

\n\n

We will use two gr.Number components and a gr.CheckboxGroup to allow users of our app to specify price ranges and borough locations. We will then use the gr.Plot component as an output for our Plotly + Mapbox map we created earlier.

\n\n
with gr.Blocks() as demo:\n    with gr.Column():\n        with gr.Row():\n            min_price = gr.Number(value=250, label=\"Minimum Price\")\n            max_price = gr.Number(value=1000, label=\"Maximum Price\")\n        boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n        btn = gr.Button(value=\"Update Filter\")\n        map = gr.Plot()\n    demo.load(filter_map, [min_price, max_price, boroughs], map)\n    btn.click(filter_map, [min_price, max_price, boroughs], map)\n
\n\n

We layout these components using the gr.Column and gr.Row and we'll also add event triggers for when the demo first loads and when our \"Update Filter\" button is clicked in order to trigger the map to update with our new filters.

\n\n

This is what the full demo code looks like:

\n\n
import gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n    filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n          (df['price'] > min_price) & (df['price'] < max_price)]\n    names = filtered_df[\"name\"].tolist()\n    prices = filtered_df[\"price\"].tolist()\n    text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n    fig = go.Figure(go.Scattermapbox(\n            customdata=text_list,\n            lat=filtered_df['latitude'].tolist(),\n            lon=filtered_df['longitude'].tolist(),\n            mode='markers',\n            marker=go.scattermapbox.Marker(\n                size=6\n            ),\n            hoverinfo=\"text\",\n            hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\ndemo.launch()\n
\n\n

Step 4 - Deployment \ud83e\udd17

\n\n

If you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the share=True parameter to launch.

\n\n

But what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.

\n\n

If you haven't used Spaces before, follow the previous guide here.

\n\n

Conclusion \ud83c\udf89

\n\n

And you're all done! That's all the code you need to build a map demo.

\n\n

Here's a link to the demo Map demo and complete code (on Hugging Face Spaces)

\n", "tags": ["PLOTS", "MAPS"], "spaces": [], "url": "/guides/plot-component-for-maps/", "contributor": null}], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects either a matplotlib.figure.Figure, a plotly.graph_objects._figure.Figure, or a dict corresponding to a bokeh plot (json_item format)", "parent": "gradio", "prev_obj": "Number", "next_obj": "Radio"}, "radio": {"class": null, "name": "Radio", "description": "Creates a set of radio buttons of which only one can be selected.
", "tags": {"preprocessing": "passes the value of the selected radio button as a {str} or its index as an {int} into the function, depending on `type`.", "postprocessing": "expects a {str} corresponding to the value of the radio button to be selected.", "examples-format": "a {str} representing the radio option to select.", "demos": "sentence_builder, titanic_survival, blocks_essay"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "choices", "annotation": "list[str] | None", "doc": "list of options to select from.", "default": "None"}, {"name": "value", "annotation": "str | Callable | None", "doc": "the button selected by default. If None, no button is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "type", "annotation": "str", "doc": "Type of value to be returned by component. \"value\" returns the string of the choice selected, \"index\" returns the index of the choice selected.", "default": "\"value\""}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "info", "annotation": "str | None", "doc": "additional component description.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, choices in this radio group will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Radio"}, {"fn": null, "name": "input", "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Radio"}, {"fn": null, "name": "select", "description": "Event listener for when the user selects Radio option. Uses event data gradio.SelectData to carry `value` referring to label of selected option, and `index` to refer to index. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Radio"}], "string_shortcuts": [["Radio", "radio", "Uses default values"]], "demos": [["sentence_builder", "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["titanic_survival", "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["blocks_essay", "import gradio as gr\n\n\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.Textbox.update(lines=2, visible=True)\n elif choice == \"long\":\n return gr.Textbox.update(lines=8, visible=True)\n else:\n return gr.Textbox.update(visible=False)\n\n\nwith gr.Blocks() as demo:\n radio = gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n )\n text = gr.Textbox(lines=2, interactive=True).style(show_copy_button=True)\n\n radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "passes the value of the selected radio button as a str or its index as an int into the function, depending on `type`.", "postprocessing": "expects a str corresponding to the value of the radio button to be selected.", "examples-format": "a str representing the radio option to select.", "parent": "gradio", "prev_obj": "Plot", "next_obj": "ScatterPlot"}, "scatterplot": {"class": null, "name": "ScatterPlot", "description": "Create a scatter plot.

", "tags": {"preprocessing": "this component does *not* accept input.", "postprocessing": "expects a pandas dataframe with the data to plot.", "demos": "scatter_plot", "guides": "creating-a-dashboard-from-bigquery-data"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "pd.DataFrame | Callable | None", "doc": "The pandas dataframe containing the data to display in a scatter plot, or a callable. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "x", "annotation": "str | None", "doc": "Column corresponding to the x axis.", "default": "None"}, {"name": "y", "annotation": "str | None", "doc": "Column corresponding to the y axis.", "default": "None"}, {"name": "color", "annotation": "str | None", "doc": "The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values.", "default": "None"}, {"name": "size", "annotation": "str | None", "doc": "The column used to determine the point size. Should contain numeric data so that gradio can map the data to the point size.", "default": "None"}, {"name": "shape", "annotation": "str | None", "doc": "The column used to determine the point shape. Should contain categorical data. Gradio will map each unique value to a different shape.", "default": "None"}, {"name": "title", "annotation": "str | None", "doc": "The title to display on top of the chart.", "default": "None"}, {"name": "tooltip", "annotation": "list[str] | str | None", "doc": "The column (or list of columns) to display on the tooltip when a user hovers a point on the plot.", "default": "None"}, {"name": "x_title", "annotation": "str | None", "doc": "The title given to the x axis. By default, uses the value of the x parameter.", "default": "None"}, {"name": "y_title", "annotation": "str | None", "doc": "The title given to the y axis. By default, uses the value of the y parameter.", "default": "None"}, {"name": "color_legend_title", "annotation": "str | None", "doc": "The title given to the color legend. By default, uses the value of color parameter.", "default": "None"}, {"name": "size_legend_title", "annotation": "str | None", "doc": "The title given to the size legend. By default, uses the value of the size parameter.", "default": "None"}, {"name": "shape_legend_title", "annotation": "str | None", "doc": "The title given to the shape legend. By default, uses the value of the shape parameter.", "default": "None"}, {"name": "color_legend_position", "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", "doc": "The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", "default": "None"}, {"name": "size_legend_position", "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", "doc": "The position of the size legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", "default": "None"}, {"name": "shape_legend_position", "annotation": "Literal['left', 'right', 'top', 'bottom', 'top-left', 'top-right', 'bottom-left', 'bottom-right', 'none'] | None", "doc": "The position of the shape legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.", "default": "None"}, {"name": "height", "annotation": "int | None", "doc": "The height of the plot in pixels.", "default": "None"}, {"name": "width", "annotation": "int | None", "doc": "The width of the plot in pixels.", "default": "None"}, {"name": "x_lim", "annotation": "list[int | float] | None", "doc": "A tuple or list containing the limits for the x-axis, specified as [x_min, x_max].", "default": "None"}, {"name": "y_lim", "annotation": "list[int | float] | None", "doc": "A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].", "default": "None"}, {"name": "caption", "annotation": "str | None", "doc": "The (optional) caption to display below the plot.", "default": "None"}, {"name": "interactive", "annotation": "bool | None", "doc": "Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.", "default": "True"}, {"name": "label", "annotation": "str | None", "doc": "The (optional) label to display on the top left corner of the plot.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": " If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "Whether the label should be displayed.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": null, "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": null, "default": "None"}, {"name": "min_width", "annotation": "int", "doc": null, "default": "160"}, {"name": "visible", "annotation": "bool", "doc": "Whether the plot should be visible.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.ScatterPlot"}, {"fn": null, "name": "clear", "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.ScatterPlot"}], "string_shortcuts": [["ScatterPlot", "scatterplot", "Uses default values"]], "demos": [["scatter_plot", "import gradio as gr\nfrom vega_datasets import data\n\ncars = data.cars()\niris = data.iris()\n\n# # Or generate your own fake data\n\n# import pandas as pd\n# import random\n\n# cars_data = {\n# \"Name\": [\"car name \" + f\" {int(i/10)}\" for i in range(400)],\n# \"Miles_per_Gallon\": [random.randint(10, 30) for _ in range(400)],\n# \"Origin\": [random.choice([\"USA\", \"Europe\", \"Japan\"]) for _ in range(400)],\n# \"Horsepower\": [random.randint(50, 250) for _ in range(400)],\n# }\n\n# iris_data = {\n# \"petalWidth\": [round(random.uniform(0, 2.5), 2) for _ in range(150)],\n# \"petalLength\": [round(random.uniform(0, 7), 2) for _ in range(150)],\n# \"species\": [\n# random.choice([\"setosa\", \"versicolor\", \"virginica\"]) for _ in range(150)\n# ],\n# }\n\n# cars = pd.DataFrame(cars_data)\n# iris = pd.DataFrame(iris_data)\n\n\ndef scatter_plot_fn(dataset):\n if dataset == \"iris\":\n return gr.ScatterPlot.update(\n value=iris,\n x=\"petalWidth\",\n y=\"petalLength\",\n color=\"species\",\n title=\"Iris Dataset\",\n color_legend_title=\"Species\",\n x_title=\"Petal Width\",\n y_title=\"Petal Length\",\n tooltip=[\"petalWidth\", \"petalLength\", \"species\"],\n caption=\"\",\n )\n else:\n return gr.ScatterPlot.update(\n value=cars,\n x=\"Horsepower\",\n y=\"Miles_per_Gallon\",\n color=\"Origin\",\n tooltip=\"Name\",\n title=\"Car Data\",\n y_title=\"Miles per Gallon\",\n color_legend_title=\"Origin of Car\",\n caption=\"MPG vs Horsepower of various cars\",\n )\n\n\nwith gr.Blocks() as scatter_plot:\n with gr.Row():\n with gr.Column():\n dataset = gr.Dropdown(choices=[\"cars\", \"iris\"], value=\"cars\")\n with gr.Column():\n plot = gr.ScatterPlot()\n dataset.change(scatter_plot_fn, inputs=dataset, outputs=plot)\n scatter_plot.load(fn=scatter_plot_fn, inputs=dataset, outputs=plot)\n\nif __name__ == \"__main__\":\n scatter_plot.launch()\n"]], "guides": [{"name": "creating-a-dashboard-from-bigquery-data", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 24, "pretty_name": "Creating A Dashboard From Bigquery Data", "content": "# Creating a Real-Time Dashboard from BigQuery Data\n\n\n\n\n[Google BigQuery](https://cloud.google.com/bigquery) is a cloud-based service for processing very large data sets. It is a serverless and highly scalable data warehousing solution that enables users to analyze data [using SQL-like queries](https://www.oreilly.com/library/view/google-bigquery-the/9781492044451/ch01.html).\n\nIn this tutorial, we will show you how to query a BigQuery dataset in Python and display the data in a dashboard that updates in real time using `gradio`. The dashboard will look like this:\n\n\n\nWe'll cover the following steps in this Guide:\n\n1. Setting up your BigQuery credentials\n2. Using the BigQuery client\n3. Building the real-time dashboard (in just *7 lines of Python*)\n\nWe'll be working with the [New York Times' COVID dataset](https://www.nytimes.com/interactive/2021/us/covid-cases.html) that is available as a public dataset on BigQuery. The dataset, named `covid19_nyt.us_counties` contains the latest information about the number of confirmed cases and deaths from COVID across US counties. \n\n**Prerequisites**: This Guide uses [Gradio Blocks](/guides/quickstart/#blocks-more-flexibility-and-control), so make your are familiar with the Blocks class. \n\n## Setting up your BigQuery Credentials\n\nTo use Gradio with BigQuery, you will need to obtain your BigQuery credentials and use them with the [BigQuery Python client](https://pypi.org/project/google-cloud-bigquery/). If you already have BigQuery credentials (as a `.json` file), you can skip this section. If not, you can do this for free in just a couple of minutes.\n\n1. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)\n\n2. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.\n\n3. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"BigQuery API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then the BigQuery is already enabled, and you're all set. \n\n4. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.\n\n5. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. Also grant the service account permissions by giving it a role such as \"BigQuery User\", which will allow you to run queries.\n\n6. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:\n\n```json\n{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n```\n\n## Using the BigQuery Client\n\nOnce you have the credentials, you will need to use the BigQuery Python client to authenticate using your credentials. To do this, you will need to install the BigQuery Python client by running the following command in the terminal:\n\n```bash\npip install google-cloud-bigquery[pandas]\n```\n\nYou'll notice that we've installed the pandas add-on, which will be helpful for processing the BigQuery dataset as a pandas dataframe. Once the client is installed, you can authenticate using your credentials by running the following code:\n\n```py\nfrom google.cloud import bigquery\n\nclient = bigquery.Client.from_service_account_json(\"path/to/key.json\")\n```\n\nWith your credentials authenticated, you can now use the BigQuery Python client to interact with your BigQuery datasets. \n\nHere is an example of a function which queries the `covid19_nyt.us_counties` dataset in BigQuery to show the top 20 counties with the most confirmed cases as of the current day:\n\n```py\nimport numpy as np\n\nQUERY = (\n 'SELECT * FROM `bigquery-public-data.covid19_nyt.us_counties` ' \n 'ORDER BY date DESC,confirmed_cases DESC '\n 'LIMIT 20')\n\ndef run_query():\n query_job = client.query(QUERY) \n query_result = query_job.result() \n df = query_result.to_dataframe()\n # Select a subset of columns \n df = df[[\"confirmed_cases\", \"deaths\", \"county\", \"state_name\"]]\n # Convert numeric columns to standard numpy types\n df = df.astype({\"deaths\": np.int64, \"confirmed_cases\": np.int64})\n return df\n```\n\n## Building the Real-Time Dashboard\n\nOnce you have a function to query the data, you can use the `gr.DataFrame` component from the Gradio library to display the results in a tabular format. This is a useful way to inspect the data and make sure that it has been queried correctly.\n\nHere is an example of how to use the `gr.DataFrame` component to display the results. By passing in the `run_query` function to `gr.DataFrame`, we instruct Gradio to run the function as soon as the page loads and show the results. In addition, you also pass in the keyword `every` to tell the dashboard to refresh every hour (60*60 seconds).\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.DataFrame(run_query, every=60*60)\n\ndemo.queue().launch() # Run the demo using queuing\n```\n\nPerhaps you'd like to add a visualization to our dashboard. You can use the `gr.ScatterPlot()` component to visualize the data in a scatter plot. This allows you to see the relationship between different variables such as case count and case deaths in the dataset and can be useful for exploring the data and gaining insights. Again, we can do this in real-time\nby passing in the `every` parameter. \n\nHere is a complete example showing how to use the `gr.ScatterPlot` to visualize in addition to displaying data with the `gr.DataFrame`\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# \ud83d\udc89 Covid Dashboard (Updated Hourly)\")\n with gr.Row():\n gr.DataFrame(run_query, every=60*60)\n gr.ScatterPlot(run_query, every=60*60, x=\"confirmed_cases\", \n y=\"deaths\", tooltip=\"county\", width=500, height=500)\n\ndemo.queue().launch() # Run the demo with queuing enabled\n```", "html": "

Creating a Real-Time Dashboard from BigQuery Data

\n\n

Google BigQuery is a cloud-based service for processing very large data sets. It is a serverless and highly scalable data warehousing solution that enables users to analyze data using SQL-like queries.

\n\n

In this tutorial, we will show you how to query a BigQuery dataset in Python and display the data in a dashboard that updates in real time using gradio. The dashboard will look like this:

\n\n

\n\n

We'll cover the following steps in this Guide:

\n\n
    \n
  1. Setting up your BigQuery credentials
  2. \n
  3. Using the BigQuery client
  4. \n
  5. Building the real-time dashboard (in just 7 lines of Python)
  6. \n
\n\n

We'll be working with the New York Times' COVID dataset that is available as a public dataset on BigQuery. The dataset, named covid19_nyt.us_counties contains the latest information about the number of confirmed cases and deaths from COVID across US counties.

\n\n

Prerequisites: This Guide uses Gradio Blocks, so make your are familiar with the Blocks class.

\n\n

Setting up your BigQuery Credentials

\n\n

To use Gradio with BigQuery, you will need to obtain your BigQuery credentials and use them with the BigQuery Python client. If you already have BigQuery credentials (as a .json file), you can skip this section. If not, you can do this for free in just a couple of minutes.

\n\n
    \n
  1. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)

  2. \n
  3. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.

  4. \n
  5. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"BigQuery API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then the BigQuery is already enabled, and you're all set.

  6. \n
  7. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.

  8. \n
  9. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. Also grant the service account permissions by giving it a role such as \"BigQuery User\", which will allow you to run queries.

  10. \n
  11. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:

  12. \n
\n\n
{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\":  \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n
\n\n

Using the BigQuery Client

\n\n

Once you have the credentials, you will need to use the BigQuery Python client to authenticate using your credentials. To do this, you will need to install the BigQuery Python client by running the following command in the terminal:

\n\n
pip install google-cloud-bigquery[pandas]\n
\n\n

You'll notice that we've installed the pandas add-on, which will be helpful for processing the BigQuery dataset as a pandas dataframe. Once the client is installed, you can authenticate using your credentials by running the following code:

\n\n
from google.cloud import bigquery\n\nclient = bigquery.Client.from_service_account_json(\"path/to/key.json\")\n
\n\n

With your credentials authenticated, you can now use the BigQuery Python client to interact with your BigQuery datasets.

\n\n

Here is an example of a function which queries the covid19_nyt.us_counties dataset in BigQuery to show the top 20 counties with the most confirmed cases as of the current day:

\n\n
import numpy as np\n\nQUERY = (\n    'SELECT * FROM `bigquery-public-data.covid19_nyt.us_counties` ' \n    'ORDER BY date DESC,confirmed_cases DESC '\n    'LIMIT 20')\n\ndef run_query():\n    query_job = client.query(QUERY)  \n    query_result = query_job.result()  \n    df = query_result.to_dataframe()\n    # Select a subset of columns \n    df = df[[\"confirmed_cases\", \"deaths\", \"county\", \"state_name\"]]\n    # Convert numeric columns to standard numpy types\n    df = df.astype({\"deaths\": np.int64, \"confirmed_cases\": np.int64})\n    return df\n
\n\n

Building the Real-Time Dashboard

\n\n

Once you have a function to query the data, you can use the gr.DataFrame component from the Gradio library to display the results in a tabular format. This is a useful way to inspect the data and make sure that it has been queried correctly.

\n\n

Here is an example of how to use the gr.DataFrame component to display the results. By passing in the run_query function to gr.DataFrame, we instruct Gradio to run the function as soon as the page loads and show the results. In addition, you also pass in the keyword every to tell the dashboard to refresh every hour (60*60 seconds).

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.DataFrame(run_query, every=60*60)\n\ndemo.queue().launch()  # Run the demo using queuing\n
\n\n

Perhaps you'd like to add a visualization to our dashboard. You can use the gr.ScatterPlot() component to visualize the data in a scatter plot. This allows you to see the relationship between different variables such as case count and case deaths in the dataset and can be useful for exploring the data and gaining insights. Again, we can do this in real-time\nby passing in the every parameter.

\n\n

Here is a complete example showing how to use the gr.ScatterPlot to visualize in addition to displaying data with the gr.DataFrame

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"# \ud83d\udc89 Covid Dashboard (Updated Hourly)\")\n    with gr.Row():\n        gr.DataFrame(run_query, every=60*60)\n        gr.ScatterPlot(run_query, every=60*60, x=\"confirmed_cases\", \n                        y=\"deaths\", tooltip=\"county\", width=500, height=500)\n\ndemo.queue().launch()  # Run the demo with queuing enabled\n
\n", "tags": ["TABULAR", "DASHBOARD", "PLOTS "], "spaces": [], "url": "/guides/creating-a-dashboard-from-bigquery-data/", "contributor": null}], "preprocessing": "this component does *not* accept input.", "postprocessing": "expects a pandas dataframe with the data to plot.", "parent": "gradio", "prev_obj": "Radio", "next_obj": "Slider"}, "slider": {"class": null, "name": "Slider", "description": "Creates a slider that ranges from `minimum` to `maximum` with a step size of `step`.
", "tags": {"preprocessing": "passes slider value as a {float} into the function.", "postprocessing": "expects an {int} or {float} returned from function and sets slider value to it as long as it is within range.", "examples-format": "A {float} or {int} representing the slider's value.", "demos": "sentence_builder, slider_release, generate_tone, titanic_survival, interface_random_slider, blocks_random_slider", "guides": "create-your-own-friends-with-a-gan"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "minimum", "annotation": "float", "doc": "minimum value for slider.", "default": "0"}, {"name": "maximum", "annotation": "float", "doc": "maximum value for slider.", "default": "100"}, {"name": "value", "annotation": "float | Callable | None", "doc": "default value. If callable, the function will be called whenever the app loads to set the initial value of the component. Ignored if randomized=True.", "default": "None"}, {"name": "step", "annotation": "float | None", "doc": "increment between slider values.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "info", "annotation": "str | None", "doc": "additional component description.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, slider will be adjustable; if False, adjusting will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "randomize", "annotation": "bool", "doc": "If True, the value of the slider when the app loads is taken uniformly at random from the range given by the minimum and maximum.", "default": "False"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Slider"}, {"fn": null, "name": "input", "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Slider"}, {"fn": null, "name": "release", "description": "This listener is triggered when the user releases the mouse on this component (e.g. when the user releases the slider). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Slider"}], "string_shortcuts": [["Slider", "slider", "Uses default values"]], "demos": [["sentence_builder", "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["slider_release", "import gradio as gr\n\n\ndef identity(x, state):\n state += 1\n return x, state, state\n\n\nwith gr.Blocks() as demo:\n slider = gr.Slider(0, 100, step=0.1)\n state = gr.State(value=0)\n with gr.Row():\n number = gr.Number(label=\"On release\")\n number2 = gr.Number(label=\"Number of events fired\")\n slider.release(identity, inputs=[slider, state], outputs=[number, state, number2], api_name=\"predict\")\n\nif __name__ == \"__main__\":\n print(\"here\")\n demo.launch()\n print(demo.server_port)\n"], ["generate_tone", "import numpy as np\nimport gradio as gr\n\nnotes = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n\ndef generate_tone(note, octave, duration):\n sr = 48000\n a4_freq, tones_from_a4 = 440, 12 * (octave - 4) + (note - 9)\n frequency = a4_freq * 2 ** (tones_from_a4 / 12)\n duration = int(duration)\n audio = np.linspace(0, duration, duration * sr)\n audio = (20000 * np.sin(audio * (2 * np.pi * frequency))).astype(np.int16)\n return sr, audio\n\ndemo = gr.Interface(\n generate_tone,\n [\n gr.Dropdown(notes, type=\"index\"),\n gr.Slider(4, 6, step=1),\n gr.Textbox(value=1, label=\"Duration in seconds\"),\n ],\n \"audio\",\n)\nif __name__ == \"__main__\":\n demo.launch()\n"], ["titanic_survival", "import os\n\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport gradio as gr\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata = pd.read_csv(os.path.join(current_dir, \"files/titanic.csv\"))\n\n\ndef encode_age(df):\n df.Age = df.Age.fillna(-0.5)\n bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)\n categories = pd.cut(df.Age, bins, labels=False)\n df.Age = categories\n return df\n\n\ndef encode_fare(df):\n df.Fare = df.Fare.fillna(-0.5)\n bins = (-1, 0, 8, 15, 31, 1000)\n categories = pd.cut(df.Fare, bins, labels=False)\n df.Fare = categories\n return df\n\n\ndef encode_df(df):\n df = encode_age(df)\n df = encode_fare(df)\n sex_mapping = {\"male\": 0, \"female\": 1}\n df = df.replace({\"Sex\": sex_mapping})\n embark_mapping = {\"S\": 1, \"C\": 2, \"Q\": 3}\n df = df.replace({\"Embarked\": embark_mapping})\n df.Embarked = df.Embarked.fillna(0)\n df[\"Company\"] = 0\n df.loc[(df[\"SibSp\"] > 0), \"Company\"] = 1\n df.loc[(df[\"Parch\"] > 0), \"Company\"] = 2\n df.loc[(df[\"SibSp\"] > 0) & (df[\"Parch\"] > 0), \"Company\"] = 3\n df = df[\n [\n \"PassengerId\",\n \"Pclass\",\n \"Sex\",\n \"Age\",\n \"Fare\",\n \"Embarked\",\n \"Company\",\n \"Survived\",\n ]\n ]\n return df\n\n\ntrain = encode_df(data)\n\nX_all = train.drop([\"Survived\", \"PassengerId\"], axis=1)\ny_all = train[\"Survived\"]\n\nnum_test = 0.20\nX_train, X_test, y_train, y_test = train_test_split(\n X_all, y_all, test_size=num_test, random_state=23\n)\n\nclf = RandomForestClassifier()\nclf.fit(X_train, y_train)\npredictions = clf.predict(X_test)\n\n\ndef predict_survival(passenger_class, is_male, age, company, fare, embark_point):\n if passenger_class is None or embark_point is None:\n return None\n df = pd.DataFrame.from_dict(\n {\n \"Pclass\": [passenger_class + 1],\n \"Sex\": [0 if is_male else 1],\n \"Age\": [age],\n \"Fare\": [fare],\n \"Embarked\": [embark_point + 1],\n \"Company\": [\n (1 if \"Sibling\" in company else 0) + (2 if \"Child\" in company else 0)\n ]\n }\n )\n df = encode_age(df)\n df = encode_fare(df)\n pred = clf.predict_proba(df)[0]\n return {\"Perishes\": float(pred[0]), \"Survives\": float(pred[1])}\n\n\ndemo = gr.Interface(\n predict_survival,\n [\n gr.Dropdown([\"first\", \"second\", \"third\"], type=\"index\"),\n \"checkbox\",\n gr.Slider(0, 80, value=25),\n gr.CheckboxGroup([\"Sibling\", \"Child\"], label=\"Travelling with (select all)\"),\n gr.Number(value=20),\n gr.Radio([\"S\", \"C\", \"Q\"], type=\"index\"),\n ],\n \"label\",\n examples=[\n [\"first\", True, 30, [], 50, \"S\"],\n [\"second\", False, 40, [\"Sibling\", \"Child\"], 10, \"Q\"],\n [\"third\", True, 30, [\"Child\"], 20, \"S\"],\n ],\n interpretation=\"default\",\n live=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["interface_random_slider", "import gradio as gr\n\n\ndef func(slider_1, slider_2, *args):\n return slider_1 + slider_2 * 5\n\n\ndemo = gr.Interface(\n func,\n [\n gr.Slider(minimum=1.5, maximum=250000.89, randomize=True, label=\"Random Big Range\"),\n gr.Slider(minimum=-1, maximum=1, randomize=True, step=0.05, label=\"Random only multiple of 0.05 allowed\"),\n gr.Slider(minimum=0, maximum=1, randomize=True, step=0.25, label=\"Random only multiples of 0.25 allowed\"),\n gr.Slider(minimum=-100, maximum=100, randomize=True, step=3, label=\"Random between -100 and 100 step 3\"),\n gr.Slider(minimum=-100, maximum=100, randomize=True, label=\"Random between -100 and 100\"),\n gr.Slider(value=0.25, minimum=5, maximum=30, step=-1),\n ],\n \"number\",\n interpretation=\"default\"\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["blocks_random_slider", "\nimport gradio as gr\n\n\ndef func(slider_1, slider_2):\n return slider_1 * 5 + slider_2\n\n\nwith gr.Blocks() as demo:\n slider = gr.Slider(minimum=-10.2, maximum=15, label=\"Random Slider (Static)\", randomize=True)\n slider_1 = gr.Slider(minimum=100, maximum=200, label=\"Random Slider (Input 1)\", randomize=True)\n slider_2 = gr.Slider(minimum=10, maximum=23.2, label=\"Random Slider (Input 2)\", randomize=True)\n slider_3 = gr.Slider(value=3, label=\"Non random slider\")\n btn = gr.Button(\"Run\")\n btn.click(func, inputs=[slider_1, slider_2], outputs=gr.Number())\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [{"name": "create-your-own-friends-with-a-gan", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 34, "pretty_name": "Create Your Own Friends With A Gan", "content": "# Create Your Own Friends with a GAN\n\n\n\n\n\n\n\n## Introduction\n\nIt seems that cryptocurrencies, [NFTs](https://www.nytimes.com/interactive/2022/03/18/technology/nft-guide.html), and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets [may be taxable, such as in Canada](https://www.canada.ca/en/revenue-agency/programs/about-canada-revenue-agency-cra/compliance/digital-currency/cryptocurrency-guide.html), today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated [CryptoPunks](https://www.larvalabs.com/cryptopunks).\n\nGenerative Adversarial Networks, often known just as *GANs*, are a specific class of deep-learning models that are designed to learn from an input dataset to create (*generate!*) new material that is convincingly similar to elements of the original training set. Famously, the website [thispersondoesnotexist.com](https://thispersondoesnotexist.com/) went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even [music](https://salu133445.github.io/musegan/)!\n\nToday we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:\n\n\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained model, also install `torch` and `torchvision`.\n\n## GANs: a very brief introduction\n\nOriginally proposed in [Goodfellow et al. 2014](https://arxiv.org/abs/1406.2661), GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the *generator*, is responsible for generating images. The other network, the *discriminator*, receives an image at a time from the generator along with a **real** image from the training data set. The discriminator then has to guess: which image is the fake?\n\nThe generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (*adversarial!*) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!\n\nFor a more in-depth look at GANs, you can take a look at [this excellent post on Analytics Vidhya](https://www.analyticsvidhya.com/blog/2021/06/a-detailed-explanation-of-gan-with-implementation-using-tensorflow-and-keras/) or this [PyTorch tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html). For now, though, we'll dive into a demo!\n\n## Step 1 \u2014 Create the Generator model\n\nTo generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:\n\n```python\nfrom torch import nn\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n```\n\nWe're taking the generator from [this repo by @teddykoker](https://github.com/teddykoker/cryptopunks-gan/blob/main/train.py#L90), where you can also see the original discriminator model structure.\n\nAfter instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at [nateraw/cryptopunks-gan](https://huggingface.co/nateraw/cryptopunks-gan):\n\n```python\nfrom huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n```\n\n## Step 2 \u2014 Defining a `predict` function\n\nThe `predict` function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our `predict` function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use `torchvision`'s `save_image` function to save the output of the model as a `png` file, and return the file name:\n\n```python\nfrom torchvision.utils import save_image\n\ndef predict(seed):\n num_punks = 4\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWe're giving our `predict` function a `seed` parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.\n\n*Note!* Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.\n\n## Step 3 \u2014 Creating a Gradio interface\n\nAt this point you can even run the code you have with `predict()`, and you'll find your freshly generated punks in your file system at `./punks.png`. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:\n\n* Set a slider input so users can choose the \"seed\" value\n* Use an image component for our output to showcase the generated punks\n* Use our `predict()` to take the seed and generate the images\n\nWith `gr.Interface()`, we can define all of that with a single function call:\n\n```python\nimport gradio as gr\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n ],\n outputs=\"image\",\n).launch()\n```\n\nLaunching the interface should present you with something like this:\n\n\n\n## Step 4 \u2014 Even more punks!\n\nGenerating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the `inputs` list that we pass to `gr.Interface`:\n\n```python\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n ],\n outputs=\"image\",\n).launch()\n```\n\nThe new input will be passed to our `predict()` function, so we have to make some changes to that function to accept a new parameter:\n\n```python\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWhen you relaunch your interface, you should see a second slider that'll let you control the number of punks!\n\n## Step 5 - Polishing it up\n\nYour Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728\n\nWe can add some examples that users can easily try out by adding this to the `gr.Interface`:\n\n```python\ngr.Interface(\n # ...\n # keep everything as it is, and then add\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n```\n\nThe `examples` parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the `inputs`. So in our case, `[seed, num_punks]`. Give it a try!\n\nYou can also try adding a `title`, `description`, and `article` to the `gr.Interface`. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 `article` will also accept HTML, as [explored in a previous guide](/guides/key-features/#descriptive-content)!\n\nWhen you're all done, you may end up with something like this:\n\n\n\nFor reference, here is our full code:\n\n```python\nimport torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n ],\n outputs=\"image\",\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n```\n----------\n\nCongratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can [scour the Hub for more GANs](https://huggingface.co/models?other=gan) (or train your own) and continue making even more awesome demos \ud83e\udd17", "html": "

Create Your Own Friends with a GAN

\n\n

Introduction

\n\n

It seems that cryptocurrencies, NFTs, and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets may be taxable, such as in Canada, today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated CryptoPunks.

\n\n

Generative Adversarial Networks, often known just as GANs, are a specific class of deep-learning models that are designed to learn from an input dataset to create (generate!) new material that is convincingly similar to elements of the original training set. Famously, the website thispersondoesnotexist.com went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even music!

\n\n

Today we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:

\n\n\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained model, also install torch and torchvision.

\n\n

GANs: a very brief introduction

\n\n

Originally proposed in Goodfellow et al. 2014, GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the generator, is responsible for generating images. The other network, the discriminator, receives an image at a time from the generator along with a real image from the training data set. The discriminator then has to guess: which image is the fake?

\n\n

The generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (adversarial!) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!

\n\n

For a more in-depth look at GANs, you can take a look at this excellent post on Analytics Vidhya or this PyTorch tutorial. For now, though, we'll dive into a demo!

\n\n

Step 1 \u2014 Create the Generator model

\n\n

To generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:

\n\n
from torch import nn\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n
\n\n

We're taking the generator from this repo by @teddykoker, where you can also see the original discriminator model structure.

\n\n

After instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at nateraw/cryptopunks-gan:

\n\n
from huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n
\n\n

Step 2 \u2014 Defining a predict function

\n\n

The predict function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our predict function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use torchvision's save_image function to save the output of the model as a png file, and return the file name:

\n\n
from torchvision.utils import save_image\n\ndef predict(seed):\n    num_punks = 4\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

We're giving our predict function a seed parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.

\n\n

Note! Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.

\n\n

Step 3 \u2014 Creating a Gradio interface

\n\n

At this point you can even run the code you have with predict(<SOME_NUMBER>), and you'll find your freshly generated punks in your file system at ./punks.png. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:

\n\n
    \n
  • Set a slider input so users can choose the \"seed\" value
  • \n
  • Use an image component for our output to showcase the generated punks
  • \n
  • Use our predict() to take the seed and generate the images
  • \n
\n\n

With gr.Interface(), we can define all of that with a single function call:

\n\n
import gradio as gr\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

Launching the interface should present you with something like this:

\n\n\n\n

Step 4 \u2014 Even more punks!

\n\n

Generating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the inputs list that we pass to gr.Interface:

\n\n
gr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

The new input will be passed to our predict() function, so we have to make some changes to that function to accept a new parameter:

\n\n
def predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

When you relaunch your interface, you should see a second slider that'll let you control the number of punks!

\n\n

Step 5 - Polishing it up

\n\n

Your Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728

\n\n

We can add some examples that users can easily try out by adding this to the gr.Interface:

\n\n
gr.Interface(\n    # ...\n    # keep everything as it is, and then add\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n
\n\n

The examples parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the inputs. So in our case, [seed, num_punks]. Give it a try!

\n\n

You can also try adding a title, description, and article to the gr.Interface. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 article will also accept HTML, as explored in a previous guide!

\n\n

When you're all done, you may end up with something like this:

\n\n\n\n

For reference, here is our full code:

\n\n
import torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n    ],\n    outputs=\"image\",\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n
\n\n
\n\n

Congratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can scour the Hub for more GANs (or train your own) and continue making even more awesome demos \ud83e\udd17

\n", "tags": ["GAN", "IMAGE", "HUB"], "spaces": ["https://huggingface.co/spaces/NimaBoscarino/cryptopunks", "https://huggingface.co/spaces/nateraw/cryptopunks-generator"], "url": "/guides/create-your-own-friends-with-a-gan/", "contributor": "Nima Boscarino and Nate Raw"}], "preprocessing": "passes slider value as a float into the function.", "postprocessing": "expects an int or float returned from function and sets slider value to it as long as it is within range.", "examples-format": "A float or int representing the slider's value.", "parent": "gradio", "prev_obj": "ScatterPlot", "next_obj": "State"}, "state": {"class": null, "name": "State", "description": "Special hidden component that stores session state across runs of the demo by the same user. The value of the State variable is cleared when the user refreshes the page.
", "tags": {"preprocessing": "No preprocessing is performed", "postprocessing": "No postprocessing is performed", "demos": "blocks_simple_squares", "guides": "real-time-speech-recognition"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "Any", "doc": "the initial value (of arbitrary type) of the state. The provided argument is deepcopied. If a callable is provided, the function will be called whenever the app loads to set the initial value of the state.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [], "demos": [["blocks_simple_squares", "import gradio as gr\n\ndemo = gr.Blocks(css=\"\"\"#btn {color: red} .abc {font-family: \"Comic Sans MS\", \"Comic Sans\", cursive !important}\"\"\")\n\nwith demo:\n default_json = {\"a\": \"a\"}\n\n num = gr.State(value=0)\n squared = gr.Number(value=0)\n btn = gr.Button(\"Next Square\", elem_id=\"btn\", elem_classes=[\"abc\", \"def\"])\n\n stats = gr.State(value=default_json)\n table = gr.JSON()\n\n def increase(var, stats_history):\n var += 1\n stats_history[str(var)] = var**2\n return var, var**2, stats_history, stats_history\n\n btn.click(increase, [num, stats], [num, squared, stats, table])\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [{"name": "real-time-speech-recognition", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 40, "pretty_name": "Real Time Speech Recognition", "content": "# Real Time Speech Recognition \n\n\n\n\n## Introduction\n\nAutomatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).\n\nUsing `gradio`, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.\n\nThis tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a ***full-context*** model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it ***streaming***, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused)!):\n\n\n\nReal-time ASR is inherently *stateful*, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use **state** with Gradio demos. \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:\n\n* Transformers (for this, `pip install transformers` and `pip install torch`) \n* DeepSpeech (`pip install deepspeech==0.8.2`)\n\nMake sure you have at least one of these installed so that you can follow along the tutorial. You will also need `ffmpeg` [installed on your system](https://www.ffmpeg.org/download.html), if you do not already have it, to process files from the microphone.\n\nHere's how to build a real time speech recognition (ASR) app: \n\n1. [Set up the Transformers ASR Model](#1-set-up-the-transformers-asr-model)\n2. [Create a Full-Context ASR Demo with Transformers](#2-create-a-full-context-asr-demo-with-transformers) \n3. [Create a Streaming ASR Demo with Transformers](#3-create-a-streaming-asr-demo-with-transformers)\n4. [Create a Streaming ASR Demo with DeepSpeech](#4-create-a-streaming-asr-demo-with-deep-speech)\n\n\n## 1. Set up the Transformers ASR Model\n\nFirst, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, `Wav2Vec2`. \n\nHere is the code to load `Wav2Vec2` from Hugging Face `transformers`.\n\n```python\nfrom transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n```\n\nThat's it! By default, the automatic speech recognition model pipeline loads Facebook's `facebook/wav2vec2-base-960h` model.\n\n## 2. Create a Full-Context ASR Demo with Transformers \n\nWe will start by creating a *full-context* ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the `pipeline` object above.\n\nWe will use `gradio`'s built in `Audio` component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain `Textbox`.\n\n```python\nimport gradio as gr\n\ndef transcribe(audio):\n text = p(audio)[\"text\"]\n return text\n\ngr.Interface(\n fn=transcribe, \n inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n outputs=\"text\").launch()\n```\n\nSo what's happening here? The `transcribe` function takes a single parameter, `audio`, which is a filepath to the audio file that the user has recorded. The `pipeline` object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox. \n\nLet's see it in action! (Record a short audio clip and then click submit, or [open in a new tab](https://huggingface.co/spaces/abidlabs/full-context-asr)):\n\n\n\n## 3. Create a Streaming ASR Demo with Transformers\n\nOk great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a *streaming* interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.\n\nThe good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same `Wav2Vec2` model. \n\nThe biggest change is that we must now introduce a `state` parameter, which holds the audio that has been *transcribed so far*. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed. \n\nWhen adding state to a Gradio demo, you need to do a total of 3 things:\n\n* Add a `state` parameter to the function\n* Return the updated `state` at the end of the function\n* Add the `\"state\"` components to the `inputs` and `outputs` in `Interface` \n\nHere's what the code looks like:\n\n```python\ndef transcribe(audio, state=\"\"):\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\" \n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nNotice that we've also made one other change, which is that we've set `live=True`. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.\n\nLet's see how it does (try below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr))!\n\n\n\n\nOne thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the `transcribe()` function so that longer audio chunks are processed. We can do this by adding a `time.sleep()` inside the function, as shown below (we'll see a proper fix next) \n\n```python\nfrom transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n time.sleep(2)\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\"\n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nTry the demo below to see the difference (or [open in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused))!\n\n\n\n\n## 4. Create a Streaming ASR Demo with DeepSpeech\n\nYou're not restricted to ASR models from the `transformers` library -- you can use your own models or models from other libraries. The `DeepSpeech` library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.\n\nGoing through the DeepSpeech library is beyond the scope of this Guide (check out their [excellent documentation here](https://deepspeech.readthedocs.io/en/r0.9/)), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model. \n\nHere's a complete example (on Linux):\n\nFirst install the DeepSpeech library and download the pretrained models from the terminal:\n\n```bash\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n```\n\nThen, create a similar `transcribe()` function as before:\n\n```python\nfrom deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n if sr not in (\n 48000,\n 16000,\n ): # Deepspeech only supports 16k, (we convert 48k -> 16k)\n raise ValueError(\"Unsupported rate\", sr)\n if sr == 48000:\n y = (\n ((y / max(np.max(y), 1)) * 32767)\n .reshape((-1, 3))\n .mean(axis=1)\n .astype(\"int16\")\n )\n sr = 16000\n return sr, y\n\n\ndef transcribe(speech, stream):\n _, y = reformat_freq(*speech)\n if stream is None:\n stream = model.createStream()\n stream.feedAudioContent(y)\n text = stream.intermediateDecode()\n return text, stream\n\n```\n\nThen, create a Gradio Interface as before (the only difference being that the return type should be `numpy` instead of a `filepath` to be compatible with the DeepSpeech models)\n\n```python\nimport gradio as gr\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"numpy\"), \n \"state\" \n ], \n outputs= [\n \"text\", \n \"state\"\n ], \n live=True).launch()\n```\n\nRunning all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.\n\n--------------------------------------------\n\n\nAnd you're done! That's all the code you need to build a web-based GUI for your ASR model. \n\nFun tip: you can share your ASR model instantly with others simply by setting `share=True` in `launch()`. \n\n\n", "html": "

Real Time Speech Recognition

\n\n

Introduction

\n\n

Automatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).

\n\n

Using gradio, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.

\n\n

This tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a full-context model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it streaming, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or in a new tab!):

\n\n\n\n

Real-time ASR is inherently stateful, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use state with Gradio demos.

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:

\n\n
    \n
  • Transformers (for this, pip install transformers and pip install torch)
  • \n
  • DeepSpeech (pip install deepspeech==0.8.2)
  • \n
\n\n

Make sure you have at least one of these installed so that you can follow along the tutorial. You will also need ffmpeg installed on your system, if you do not already have it, to process files from the microphone.

\n\n

Here's how to build a real time speech recognition (ASR) app:

\n\n
    \n
  1. Set up the Transformers ASR Model
  2. \n
  3. Create a Full-Context ASR Demo with Transformers
  4. \n
  5. Create a Streaming ASR Demo with Transformers
  6. \n
  7. Create a Streaming ASR Demo with DeepSpeech
  8. \n
\n\n

1. Set up the Transformers ASR Model

\n\n

First, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, Wav2Vec2.

\n\n

Here is the code to load Wav2Vec2 from Hugging Face transformers.

\n\n
from transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n
\n\n

That's it! By default, the automatic speech recognition model pipeline loads Facebook's facebook/wav2vec2-base-960h model.

\n\n

2. Create a Full-Context ASR Demo with Transformers

\n\n

We will start by creating a full-context ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the pipeline object above.

\n\n

We will use gradio's built in Audio component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain Textbox.

\n\n
import gradio as gr\n\ndef transcribe(audio):\n    text = p(audio)[\"text\"]\n    return text\n\ngr.Interface(\n    fn=transcribe, \n    inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n    outputs=\"text\").launch()\n
\n\n

So what's happening here? The transcribe function takes a single parameter, audio, which is a filepath to the audio file that the user has recorded. The pipeline object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox.

\n\n

Let's see it in action! (Record a short audio clip and then click submit, or open in a new tab):

\n\n\n\n

3. Create a Streaming ASR Demo with Transformers

\n\n

Ok great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a streaming interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.

\n\n

The good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same Wav2Vec2 model.

\n\n

The biggest change is that we must now introduce a state parameter, which holds the audio that has been transcribed so far. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed.

\n\n

When adding state to a Gradio demo, you need to do a total of 3 things:

\n\n
    \n
  • Add a state parameter to the function
  • \n
  • Return the updated state at the end of the function
  • \n
  • Add the \"state\" components to the inputs and outputs in Interface
  • \n
\n\n

Here's what the code looks like:

\n\n
def transcribe(audio, state=\"\"):\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\" \n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Notice that we've also made one other change, which is that we've set live=True. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.

\n\n

Let's see how it does (try below or in a new tab)!

\n\n\n\n

One thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the transcribe() function so that longer audio chunks are processed. We can do this by adding a time.sleep() inside the function, as shown below (we'll see a proper fix next)

\n\n
from transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n    time.sleep(2)\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\"\n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Try the demo below to see the difference (or open in a new tab)!

\n\n\n\n

4. Create a Streaming ASR Demo with DeepSpeech

\n\n

You're not restricted to ASR models from the transformers library -- you can use your own models or models from other libraries. The DeepSpeech library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.

\n\n

Going through the DeepSpeech library is beyond the scope of this Guide (check out their excellent documentation here), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model.

\n\n

Here's a complete example (on Linux):

\n\n

First install the DeepSpeech library and download the pretrained models from the terminal:

\n\n
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n
\n\n

Then, create a similar transcribe() function as before:

\n\n
from deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n    if sr not in (\n        48000,\n        16000,\n    ):  # Deepspeech only supports 16k, (we convert 48k -> 16k)\n        raise ValueError(\"Unsupported rate\", sr)\n    if sr == 48000:\n        y = (\n            ((y / max(np.max(y), 1)) * 32767)\n            .reshape((-1, 3))\n            .mean(axis=1)\n            .astype(\"int16\")\n        )\n        sr = 16000\n    return sr, y\n\n\ndef transcribe(speech, stream):\n    _, y = reformat_freq(*speech)\n    if stream is None:\n        stream = model.createStream()\n    stream.feedAudioContent(y)\n    text = stream.intermediateDecode()\n    return text, stream\n\n
\n\n

Then, create a Gradio Interface as before (the only difference being that the return type should be numpy instead of a filepath to be compatible with the DeepSpeech models)

\n\n
import gradio as gr\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"numpy\"), \n        \"state\" \n    ], \n    outputs= [\n        \"text\", \n        \"state\"\n    ], \n    live=True).launch()\n
\n\n

Running all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.

\n\n
\n\n

And you're done! That's all the code you need to build a web-based GUI for your ASR model.

\n\n

Fun tip: you can share your ASR model instantly with others simply by setting share=True in launch().

\n", "tags": ["ASR", "SPEECH", "STREAMING"], "spaces": ["https://huggingface.co/spaces/abidlabs/streaming-asr-paused", "https://huggingface.co/spaces/abidlabs/full-context-asr"], "url": "/guides/real-time-speech-recognition/", "contributor": null}], "preprocessing": "No preprocessing is performed", "postprocessing": "No postprocessing is performed", "parent": "gradio", "prev_obj": "Slider", "next_obj": "Textbox"}, "textbox": {"class": null, "name": "Textbox", "description": "Creates a textarea for user to enter string input or display string output.
", "tags": {"preprocessing": "passes textarea value as a {str} into the function.", "postprocessing": "expects a {str} returned from function and sets textarea value to it.", "examples-format": "a {str} representing the textbox input.", "demos": "hello_world, diff_texts, sentence_builder", "guides": "creating-a-chatbot, real-time-speech-recognition"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | Callable | None", "doc": "default text to provide in textarea. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "\"\""}, {"name": "lines", "annotation": "int", "doc": "minimum number of line rows to provide in textarea.", "default": "1"}, {"name": "max_lines", "annotation": "int", "doc": "maximum number of line rows to provide in textarea.", "default": "20"}, {"name": "placeholder", "annotation": "str | None", "doc": "placeholder hint to provide behind textarea.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "info", "annotation": "str | None", "doc": "additional component description.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, will be rendered as an editable textbox; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "autofocus", "annotation": "bool", "doc": "If True, will focus on the textbox when the page loads.", "default": "False"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "type", "annotation": "Literal['text', 'password', 'email']", "doc": "The type of textbox. One of: 'text', 'password', 'email', Default is 'text'.", "default": "\"text\""}, {"name": "text_align", "annotation": "Literal['left', 'right'] | None", "doc": "How to align the text in the textbox, can be: \"left\", \"right\", or None (default). If None, the alignment is left if `rtl` is False, or right if `rtl` is True. Can only be changed if `type` is \"text\".", "default": "None"}, {"name": "rtl", "annotation": "bool", "doc": "If True and `type` is \"text\", sets the direction of the text to right-to-left (cursor appears on the left of the text). Default is False, which renders cursor on the right.", "default": "False"}, {"name": "show_copy_button", "annotation": "bool", "doc": "If True, includes a copy button to copy the text in the textbox. Only applies if show_label is True.", "default": "False"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Textbox"}, {"fn": null, "name": "input", "description": "This listener is triggered when the user changes the value of the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Textbox"}, {"fn": null, "name": "submit", "description": "This listener is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Textbox"}, {"fn": null, "name": "focus", "description": "This listener is triggered when the component is focused (e.g. when the user clicks inside a textbox). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Textbox"}, {"fn": null, "name": "blur", "description": "This listener is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Textbox"}, {"fn": null, "name": "select", "description": "Event listener for when the user selects text in the Textbox. Uses event data gradio.SelectData to carry `value` referring to selected substring, and `index` tuple referring to selected range endpoints. See EventData documentation on how to use this event data.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Textbox"}], "string_shortcuts": [["Textbox", "textbox", "Uses default values"], ["TextArea", "textarea", "Uses lines=7"]], "demos": [["hello_world", "import gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n \nif __name__ == \"__main__\":\n demo.launch() "], ["diff_texts", "from difflib import Differ\n\nimport gradio as gr\n\n\ndef diff_texts(text1, text2):\n d = Differ()\n return [\n (token[2:], token[0] if token[0] != \" \" else None)\n for token in d.compare(text1, text2)\n ]\n\n\ndemo = gr.Interface(\n diff_texts,\n [\n gr.Textbox(\n label=\"Text 1\",\n info=\"Initial text\",\n lines=3,\n value=\"The quick brown fox jumped over the lazy dogs.\",\n ),\n gr.Textbox(\n label=\"Text 2\",\n info=\"Text to compare\",\n lines=3,\n value=\"The fast brown fox jumps over lazy dogs.\",\n ),\n ],\n gr.HighlightedText(\n label=\"Diff\",\n combine_adjacent=True,\n show_legend=True,\n ).style(color_map={\"+\": \"red\", \"-\": \"green\"}),\n theme=gr.themes.Base()\n)\nif __name__ == \"__main__\":\n demo.launch()\n"], ["sentence_builder", "import gradio as gr\n\n\ndef sentence_builder(quantity, animal, countries, place, activity_list, morning):\n return f\"\"\"The {quantity} {animal}s from {\" and \".join(countries)} went to the {place} where they {\" and \".join(activity_list)} until the {\"morning\" if morning else \"night\"}\"\"\"\n\n\ndemo = gr.Interface(\n sentence_builder,\n [\n gr.Slider(2, 20, value=4, label=\"Count\", info=\"Choose between 2 and 20\"),\n gr.Dropdown(\n [\"cat\", \"dog\", \"bird\"], label=\"Animal\", info=\"Will add more animals later!\"\n ),\n gr.CheckboxGroup([\"USA\", \"Japan\", \"Pakistan\"], label=\"Countries\", info=\"Where are they from?\"),\n gr.Radio([\"park\", \"zoo\", \"road\"], label=\"Location\", info=\"Where did they go?\"),\n gr.Dropdown(\n [\"ran\", \"swam\", \"ate\", \"slept\"], value=[\"swam\", \"slept\"], multiselect=True, label=\"Activity\", info=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed auctor, nisl eget ultricies aliquam, nunc nisl aliquet nunc, eget aliquam nisl nunc vel nisl.\"\n ),\n gr.Checkbox(label=\"Morning\", info=\"Did they do it in the morning?\"),\n ],\n \"text\",\n examples=[\n [2, \"cat\", [\"Japan\", \"Pakistan\"], \"park\", [\"ate\", \"swam\"], True],\n [4, \"dog\", [\"Japan\"], \"zoo\", [\"ate\", \"swam\"], False],\n [10, \"bird\", [\"USA\", \"Pakistan\"], \"road\", [\"ran\"], False],\n [8, \"cat\", [\"Pakistan\"], \"zoo\", [\"ate\"], True],\n ]\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [{"name": "real-time-speech-recognition", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 40, "pretty_name": "Real Time Speech Recognition", "content": "# Real Time Speech Recognition \n\n\n\n\n## Introduction\n\nAutomatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).\n\nUsing `gradio`, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.\n\nThis tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a ***full-context*** model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it ***streaming***, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused)!):\n\n\n\nReal-time ASR is inherently *stateful*, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use **state** with Gradio demos. \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:\n\n* Transformers (for this, `pip install transformers` and `pip install torch`) \n* DeepSpeech (`pip install deepspeech==0.8.2`)\n\nMake sure you have at least one of these installed so that you can follow along the tutorial. You will also need `ffmpeg` [installed on your system](https://www.ffmpeg.org/download.html), if you do not already have it, to process files from the microphone.\n\nHere's how to build a real time speech recognition (ASR) app: \n\n1. [Set up the Transformers ASR Model](#1-set-up-the-transformers-asr-model)\n2. [Create a Full-Context ASR Demo with Transformers](#2-create-a-full-context-asr-demo-with-transformers) \n3. [Create a Streaming ASR Demo with Transformers](#3-create-a-streaming-asr-demo-with-transformers)\n4. [Create a Streaming ASR Demo with DeepSpeech](#4-create-a-streaming-asr-demo-with-deep-speech)\n\n\n## 1. Set up the Transformers ASR Model\n\nFirst, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, `Wav2Vec2`. \n\nHere is the code to load `Wav2Vec2` from Hugging Face `transformers`.\n\n```python\nfrom transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n```\n\nThat's it! By default, the automatic speech recognition model pipeline loads Facebook's `facebook/wav2vec2-base-960h` model.\n\n## 2. Create a Full-Context ASR Demo with Transformers \n\nWe will start by creating a *full-context* ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the `pipeline` object above.\n\nWe will use `gradio`'s built in `Audio` component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain `Textbox`.\n\n```python\nimport gradio as gr\n\ndef transcribe(audio):\n text = p(audio)[\"text\"]\n return text\n\ngr.Interface(\n fn=transcribe, \n inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n outputs=\"text\").launch()\n```\n\nSo what's happening here? The `transcribe` function takes a single parameter, `audio`, which is a filepath to the audio file that the user has recorded. The `pipeline` object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox. \n\nLet's see it in action! (Record a short audio clip and then click submit, or [open in a new tab](https://huggingface.co/spaces/abidlabs/full-context-asr)):\n\n\n\n## 3. Create a Streaming ASR Demo with Transformers\n\nOk great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a *streaming* interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.\n\nThe good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same `Wav2Vec2` model. \n\nThe biggest change is that we must now introduce a `state` parameter, which holds the audio that has been *transcribed so far*. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed. \n\nWhen adding state to a Gradio demo, you need to do a total of 3 things:\n\n* Add a `state` parameter to the function\n* Return the updated `state` at the end of the function\n* Add the `\"state\"` components to the `inputs` and `outputs` in `Interface` \n\nHere's what the code looks like:\n\n```python\ndef transcribe(audio, state=\"\"):\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\" \n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nNotice that we've also made one other change, which is that we've set `live=True`. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.\n\nLet's see how it does (try below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr))!\n\n\n\n\nOne thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the `transcribe()` function so that longer audio chunks are processed. We can do this by adding a `time.sleep()` inside the function, as shown below (we'll see a proper fix next) \n\n```python\nfrom transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n time.sleep(2)\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\"\n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nTry the demo below to see the difference (or [open in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused))!\n\n\n\n\n## 4. Create a Streaming ASR Demo with DeepSpeech\n\nYou're not restricted to ASR models from the `transformers` library -- you can use your own models or models from other libraries. The `DeepSpeech` library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.\n\nGoing through the DeepSpeech library is beyond the scope of this Guide (check out their [excellent documentation here](https://deepspeech.readthedocs.io/en/r0.9/)), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model. \n\nHere's a complete example (on Linux):\n\nFirst install the DeepSpeech library and download the pretrained models from the terminal:\n\n```bash\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n```\n\nThen, create a similar `transcribe()` function as before:\n\n```python\nfrom deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n if sr not in (\n 48000,\n 16000,\n ): # Deepspeech only supports 16k, (we convert 48k -> 16k)\n raise ValueError(\"Unsupported rate\", sr)\n if sr == 48000:\n y = (\n ((y / max(np.max(y), 1)) * 32767)\n .reshape((-1, 3))\n .mean(axis=1)\n .astype(\"int16\")\n )\n sr = 16000\n return sr, y\n\n\ndef transcribe(speech, stream):\n _, y = reformat_freq(*speech)\n if stream is None:\n stream = model.createStream()\n stream.feedAudioContent(y)\n text = stream.intermediateDecode()\n return text, stream\n\n```\n\nThen, create a Gradio Interface as before (the only difference being that the return type should be `numpy` instead of a `filepath` to be compatible with the DeepSpeech models)\n\n```python\nimport gradio as gr\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"numpy\"), \n \"state\" \n ], \n outputs= [\n \"text\", \n \"state\"\n ], \n live=True).launch()\n```\n\nRunning all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.\n\n--------------------------------------------\n\n\nAnd you're done! That's all the code you need to build a web-based GUI for your ASR model. \n\nFun tip: you can share your ASR model instantly with others simply by setting `share=True` in `launch()`. \n\n\n", "html": "

Real Time Speech Recognition

\n\n

Introduction

\n\n

Automatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).

\n\n

Using gradio, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.

\n\n

This tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a full-context model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it streaming, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or in a new tab!):

\n\n\n\n

Real-time ASR is inherently stateful, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use state with Gradio demos.

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:

\n\n
    \n
  • Transformers (for this, pip install transformers and pip install torch)
  • \n
  • DeepSpeech (pip install deepspeech==0.8.2)
  • \n
\n\n

Make sure you have at least one of these installed so that you can follow along the tutorial. You will also need ffmpeg installed on your system, if you do not already have it, to process files from the microphone.

\n\n

Here's how to build a real time speech recognition (ASR) app:

\n\n
    \n
  1. Set up the Transformers ASR Model
  2. \n
  3. Create a Full-Context ASR Demo with Transformers
  4. \n
  5. Create a Streaming ASR Demo with Transformers
  6. \n
  7. Create a Streaming ASR Demo with DeepSpeech
  8. \n
\n\n

1. Set up the Transformers ASR Model

\n\n

First, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, Wav2Vec2.

\n\n

Here is the code to load Wav2Vec2 from Hugging Face transformers.

\n\n
from transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n
\n\n

That's it! By default, the automatic speech recognition model pipeline loads Facebook's facebook/wav2vec2-base-960h model.

\n\n

2. Create a Full-Context ASR Demo with Transformers

\n\n

We will start by creating a full-context ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the pipeline object above.

\n\n

We will use gradio's built in Audio component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain Textbox.

\n\n
import gradio as gr\n\ndef transcribe(audio):\n    text = p(audio)[\"text\"]\n    return text\n\ngr.Interface(\n    fn=transcribe, \n    inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n    outputs=\"text\").launch()\n
\n\n

So what's happening here? The transcribe function takes a single parameter, audio, which is a filepath to the audio file that the user has recorded. The pipeline object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox.

\n\n

Let's see it in action! (Record a short audio clip and then click submit, or open in a new tab):

\n\n\n\n

3. Create a Streaming ASR Demo with Transformers

\n\n

Ok great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a streaming interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.

\n\n

The good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same Wav2Vec2 model.

\n\n

The biggest change is that we must now introduce a state parameter, which holds the audio that has been transcribed so far. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed.

\n\n

When adding state to a Gradio demo, you need to do a total of 3 things:

\n\n
    \n
  • Add a state parameter to the function
  • \n
  • Return the updated state at the end of the function
  • \n
  • Add the \"state\" components to the inputs and outputs in Interface
  • \n
\n\n

Here's what the code looks like:

\n\n
def transcribe(audio, state=\"\"):\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\" \n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Notice that we've also made one other change, which is that we've set live=True. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.

\n\n

Let's see how it does (try below or in a new tab)!

\n\n\n\n

One thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the transcribe() function so that longer audio chunks are processed. We can do this by adding a time.sleep() inside the function, as shown below (we'll see a proper fix next)

\n\n
from transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n    time.sleep(2)\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\"\n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Try the demo below to see the difference (or open in a new tab)!

\n\n\n\n

4. Create a Streaming ASR Demo with DeepSpeech

\n\n

You're not restricted to ASR models from the transformers library -- you can use your own models or models from other libraries. The DeepSpeech library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.

\n\n

Going through the DeepSpeech library is beyond the scope of this Guide (check out their excellent documentation here), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model.

\n\n

Here's a complete example (on Linux):

\n\n

First install the DeepSpeech library and download the pretrained models from the terminal:

\n\n
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n
\n\n

Then, create a similar transcribe() function as before:

\n\n
from deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n    if sr not in (\n        48000,\n        16000,\n    ):  # Deepspeech only supports 16k, (we convert 48k -> 16k)\n        raise ValueError(\"Unsupported rate\", sr)\n    if sr == 48000:\n        y = (\n            ((y / max(np.max(y), 1)) * 32767)\n            .reshape((-1, 3))\n            .mean(axis=1)\n            .astype(\"int16\")\n        )\n        sr = 16000\n    return sr, y\n\n\ndef transcribe(speech, stream):\n    _, y = reformat_freq(*speech)\n    if stream is None:\n        stream = model.createStream()\n    stream.feedAudioContent(y)\n    text = stream.intermediateDecode()\n    return text, stream\n\n
\n\n

Then, create a Gradio Interface as before (the only difference being that the return type should be numpy instead of a filepath to be compatible with the DeepSpeech models)

\n\n
import gradio as gr\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"numpy\"), \n        \"state\" \n    ], \n    outputs= [\n        \"text\", \n        \"state\"\n    ], \n    live=True).launch()\n
\n\n

Running all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.

\n\n
\n\n

And you're done! That's all the code you need to build a web-based GUI for your ASR model.

\n\n

Fun tip: you can share your ASR model instantly with others simply by setting share=True in launch().

\n", "tags": ["ASR", "SPEECH", "STREAMING"], "spaces": ["https://huggingface.co/spaces/abidlabs/streaming-asr-paused", "https://huggingface.co/spaces/abidlabs/full-context-asr"], "url": "/guides/real-time-speech-recognition/", "contributor": null}], "preprocessing": "passes textarea value as a str into the function.", "postprocessing": "expects a str returned from function and sets textarea value to it.", "examples-format": "a str representing the textbox input.", "parent": "gradio", "prev_obj": "State", "next_obj": "Timeseries"}, "timeseries": {"class": null, "name": "Timeseries", "description": "Creates a component that can be used to upload/preview timeseries csv files or display a dataframe consisting of a time series graphically.", "tags": {"preprocessing": "passes the uploaded timeseries data as a {pandas.DataFrame} into the function", "postprocessing": "expects a {pandas.DataFrame} or {str} path to a csv to be returned, which is then displayed as a timeseries graph", "examples-format": "a {str} filepath of csv data with time series data.", "demos": "fraud_detector"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | Callable | None", "doc": "File path for the timeseries csv file. If callable, the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "x", "annotation": "str | None", "doc": "Column name of x (time) series. None if csv has no headers, in which case first column is x series.", "default": "None"}, {"name": "y", "annotation": "str | list[str] | None", "doc": "Column name of y series, or list of column names if multiple series. None if csv has no headers, in which case every column after first is a y series.", "default": "None"}, {"name": "colors", "annotation": "list[str] | None", "doc": "an ordered list of colors to use for each line plot", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, will allow users to upload a timeseries csv; if False, can only be used to display timeseries data. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Timeseries"}], "string_shortcuts": [["Timeseries", "timeseries", "Uses default values"]], "demos": [["fraud_detector", "import random\nimport os\nimport gradio as gr\n\n\ndef fraud_detector(card_activity, categories, sensitivity):\n activity_range = random.randint(0, 100)\n drop_columns = [\n column for column in [\"retail\", \"food\", \"other\"] if column not in categories\n ]\n if len(drop_columns):\n card_activity.drop(columns=drop_columns, inplace=True)\n return (\n card_activity,\n card_activity,\n {\"fraud\": activity_range / 100.0, \"not fraud\": 1 - activity_range / 100.0},\n )\n\n\ndemo = gr.Interface(\n fraud_detector,\n [\n gr.Timeseries(x=\"time\", y=[\"retail\", \"food\", \"other\"]),\n gr.CheckboxGroup(\n [\"retail\", \"food\", \"other\"], value=[\"retail\", \"food\", \"other\"]\n ),\n gr.Slider(1, 3),\n ],\n [\n \"dataframe\",\n gr.Timeseries(x=\"time\", y=[\"retail\", \"food\", \"other\"]),\n gr.Label(label=\"Fraud Level\"),\n ],\n examples=[\n [os.path.join(os.path.dirname(__file__), \"fraud.csv\"), [\"retail\", \"food\", \"other\"], 1.0],\n ],\n)\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "passes the uploaded timeseries data as a pandas.DataFrame into the function", "postprocessing": "expects a pandas.DataFrame or str path to a csv to be returned, which is then displayed as a timeseries graph", "examples-format": "a str filepath of csv data with time series data.", "parent": "gradio", "prev_obj": "Textbox", "next_obj": "UploadButton"}, "uploadbutton": {"class": null, "name": "UploadButton", "description": "Used to create an upload button, when cicked allows a user to upload files that satisfy the specified file type or generic files (if file_type not set).", "tags": {"preprocessing": "passes the uploaded file as a {file-object} or {List[file-object]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`)", "postprocessing": "expects function to return a {str} path to a file, or {List[str]} consisting of paths to files.", "examples-format": "a {str} path to a local file that populates the component.", "demos": "upload_button"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "label", "annotation": "str", "doc": "Text to display on the button. Defaults to \"Upload a File\".", "default": "\"Upload a File\""}, {"name": "value", "annotation": "str | list[str] | Callable | None", "doc": "File or list of files to upload by default.", "default": "None"}, {"name": "variant", "annotation": "Literal['primary', 'secondary', 'stop']", "doc": "'primary' for main call-to-action, 'secondary' for a more subdued style, 'stop' for a stop button.", "default": "\"secondary\""}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "size", "annotation": "Literal['sm', 'lg'] | None", "doc": "Size of the button. Can be \"sm\" or \"lg\".", "default": "None"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int | None", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "None"}, {"name": "interactive", "annotation": "bool", "doc": "If False, the UploadButton will be in a disabled state.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "type", "annotation": "Literal['file', 'bytes']", "doc": "Type of value to be returned by component. \"file\" returns a temporary file object with the same base name as the uploaded file, whose full path can be retrieved by file_obj.name, \"binary\" returns an bytes object.", "default": "\"file\""}, {"name": "file_count", "annotation": "Literal['single', 'multiple', 'directory']", "doc": "if single, allows user to upload one file. If \"multiple\", user uploads multiple files. If \"directory\", user uploads all files in selected directory. Return type will be list for each file in case of \"multiple\" or \"directory\".", "default": "\"single\""}, {"name": "file_types", "annotation": "list[str] | None", "doc": "List of type of files to be uploaded. \"file\" allows any file to be uploaded, \"image\" allows only image files to be uploaded, \"audio\" allows only audio files to be uploaded, \"video\" allows only video files to be uploaded, \"text\" allows only text files to be uploaded.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "click", "description": "This listener is triggered when the component (e.g. a button) is clicked. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.UploadButton"}, {"fn": null, "name": "upload", "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.UploadButton"}], "string_shortcuts": [["UploadButton", "uploadbutton", "Uses default values"]], "demos": [["upload_button", "import gradio as gr\n\ndef upload_file(files):\n file_paths = [file.name for file in files]\n return file_paths\n\nwith gr.Blocks() as demo:\n file_output = gr.File()\n upload_button = gr.UploadButton(\"Click to Upload a File\", file_types=[\"image\", \"video\"], file_count=\"multiple\")\n upload_button.upload(upload_file, upload_button, file_output)\n\ndemo.launch()\n"]], "preprocessing": "passes the uploaded file as a file-object or List[file-object] depending on `file_count` (or a bytes/Listbytes depending on `type`)", "postprocessing": "expects function to return a str path to a file, or List[str] consisting of paths to files.", "examples-format": "a str path to a local file that populates the component.", "parent": "gradio", "prev_obj": "Timeseries", "next_obj": "Video"}, "video": {"class": null, "name": "Video", "description": "Creates a video component that can be used to upload/record videos (as an input) or display videos (as an output). For the video to be playable in the browser it must have a compatible container and codec combination. Allowed combinations are .mp4 with h264 codec, .ogg with theora codec, and .webm with vp9 codec. If the component detects that the output video would not be playable in the browser it will attempt to convert it to a playable mp4 video. If the conversion fails, the original video is returned.", "tags": {"preprocessing": "passes the uploaded video as a {str} filepath or URL whose extension can be modified by `format`.", "postprocessing": "expects a {str} or {pathlib.Path} filepath to a video which is displayed, or a {Tuple[str | pathlib.Path, str | pathlib.Path | None]} where the first element is a filepath to a video and the second element is an optional filepath to a subtitle file.", "examples-format": "a {str} filepath to a local file that contains the video, or a {Tuple[str, str]} where the first element is a filepath to a video file and the second element is a filepath to a subtitle file.", "demos": "video_identity, video_subtitle"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "value", "annotation": "str | Path | tuple[str | Path, str | Path | None] | Callable | None", "doc": "A path or URL for the default value that Video component is going to take. Can also be a tuple consisting of (video filepath, subtitle filepath). If a subtitle file is provided, it should be of type .srt or .vtt. Or can be callable, in which case the function will be called whenever the app loads to set the initial value of the component.", "default": "None"}, {"name": "format", "annotation": "str | None", "doc": "Format of video format to be returned by component, such as 'avi' or 'mp4'. Use 'mp4' to ensure browser playability. If set to None, video will keep uploaded format.", "default": "None"}, {"name": "source", "annotation": "Literal['upload', 'webcam']", "doc": "Source of video. \"upload\" creates a box where user can drop an video file, \"webcam\" allows user to record a video from their webcam.", "default": "\"upload\""}, {"name": "height", "annotation": "int | None", "doc": "Height of the displayed video in pixels.", "default": "None"}, {"name": "width", "annotation": "int | None", "doc": "Width of the displayed video in pixels.", "default": "None"}, {"name": "label", "annotation": "str | None", "doc": "component name in interface.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.", "default": "None"}, {"name": "show_label", "annotation": "bool | None", "doc": "if True, will display label.", "default": "None"}, {"name": "container", "annotation": "bool", "doc": "If True, will place the component in a container - providing some extra padding around the border.", "default": "True"}, {"name": "scale", "annotation": "int | None", "doc": "relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.", "default": "None"}, {"name": "min_width", "annotation": "int", "doc": "minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.", "default": "160"}, {"name": "interactive", "annotation": "bool | None", "doc": "if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output.", "default": "None"}, {"name": "visible", "annotation": "bool", "doc": "If False, component will be hidden.", "default": "True"}, {"name": "elem_id", "annotation": "str | None", "doc": "An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "elem_classes", "annotation": "list[str] | str | None", "doc": "An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.", "default": "None"}, {"name": "mirror_webcam", "annotation": "bool", "doc": "If True webcam will be mirrored. Default is True.", "default": "True"}, {"name": "include_audio", "annotation": "bool | None", "doc": "Whether the component should record/retain the audio track for a video. By default, audio is excluded for webcam videos and included for uploaded videos.", "default": "None"}, {"name": "autoplay", "annotation": "bool", "doc": "Whether to automatically play the video when the component is used as an output. Note: browsers will not autoplay video files if the user has not interacted with the page yet.", "default": "False"}, {"name": "show_share_button", "annotation": "bool | None", "doc": "If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise.", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "change", "description": "This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). See `.input()` for a listener that is only triggered by user input. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Video"}, {"fn": null, "name": "clear", "description": "This listener is triggered when the user clears the component (e.g. image or audio) using the X button for the component. This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Video"}, {"fn": null, "name": "play", "description": "This listener is triggered when the user plays the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Video"}, {"fn": null, "name": "pause", "description": "This listener is triggered when the media stops playing for any reason (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Video"}, {"fn": null, "name": "stop", "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Video"}, {"fn": null, "name": "end", "description": "This listener is triggered when the user reaches the end of the media track (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Video"}, {"fn": null, "name": "start_recording", "description": "This listener is triggered when the user starts recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Video"}, {"fn": null, "name": "stop_recording", "description": "This listener is triggered when the user stops recording with the component (e.g. audio or video). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Video"}, {"fn": null, "name": "upload", "description": "This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.", "tags": {}, "parameters": [{"name": "fn", "annotation": "Callable | None", "doc": "the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component."}, {"name": "inputs", "annotation": "Component | Sequence[Component] | set[Component] | None", "doc": "List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.", "default": "None"}, {"name": "outputs", "annotation": "Component | Sequence[Component] | None", "doc": "List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list.", "default": "None"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "None"}, {"name": "status_tracker", "annotation": "None", "doc": null, "default": "None"}, {"name": "scroll_to_output", "annotation": "bool", "doc": "If True, will scroll to output component on completion", "default": "False"}, {"name": "show_progress", "annotation": "Literal['full', 'minimal', 'hidden']", "doc": "If True, will show progress animation while pending", "default": "\"full\""}, {"name": "queue", "annotation": "bool | None", "doc": "If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app.", "default": "None"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.", "default": "False"}, {"name": "max_batch_size", "annotation": "int", "doc": "Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)", "default": "4"}, {"name": "preprocess", "annotation": "bool", "doc": "If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "If False, will not run postprocessing of component data before returning 'fn' output to the browser.", "default": "True"}, {"name": "cancels", "annotation": "dict[str, Any] | list[dict[str, Any]] | None", "doc": "A list of other events to cancel when This listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish.", "default": "None"}, {"name": "every", "annotation": "float | None", "doc": "Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.", "default": "None"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Video"}], "string_shortcuts": [["Video", "video", "Uses default values"], ["PlayableVideo", "playablevideo", "Uses format=\"mp4\""]], "demos": [["video_identity", "import gradio as gr\nimport os\n\n\ndef video_identity(video):\n return video\n\n\ndemo = gr.Interface(video_identity, \n gr.Video(), \n \"playable_video\", \n examples=[\n os.path.join(os.path.dirname(__file__), \n \"video/video_sample.mp4\")], \n cache_examples=True)\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["video_subtitle", "import gradio as gr\nimport os\n\na = os.path.join(os.path.dirname(__file__), \"files/a.mp4\") # Video\nb = os.path.join(os.path.dirname(__file__), \"files/b.mp4\") # Video\ns1 = os.path.join(os.path.dirname(__file__), \"files/s1.srt\") # Subtitle\ns2 = os.path.join(os.path.dirname(__file__), \"files/s2.vtt\") # Subtitle\n\n\ndef video_demo(video, subtitle=None):\n if subtitle is None:\n return video\n\n return [video, subtitle.name]\n\n\ndemo = gr.Interface(\n fn=video_demo,\n inputs=[\n gr.Video(type=\"file\", label=\"In\", interactive=True),\n gr.File(label=\"Subtitle\", file_types=[\".srt\", \".vtt\"]),\n ],\n outputs=gr.Video(label=\"Out\"),\n examples=[\n [a, s1],\n [b, s2],\n [a, None],\n ],\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "preprocessing": "passes the uploaded video as a str filepath or URL whose extension can be modified by `format`.", "postprocessing": "expects a str or pathlib.Path filepath to a video which is displayed, or a Tuple[str | pathlib.Path, str | pathlib.Path | None] where the first element is a filepath to a video and the second element is an optional filepath to a subtitle file.", "examples-format": "a str filepath to a local file that contains the video, or a Tuple[str, str] where the first element is a filepath to a video file and the second element is a filepath to a subtitle file.", "parent": "gradio", "prev_obj": "UploadButton", "next_obj": "Examples"}}, "helpers": {"error": {"class": null, "name": "Error", "description": "This class allows you to pass custom error messages to the user. You can do so by raising a gr.Error(\"custom message\") anywhere in the code, and when that line is executed the custom message will appear in a modal on the demo.
", "tags": {"demos": "calculator"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "message", "annotation": "", "doc": "The error message to be displayed to the user.", "default": "\"Error raised.\""}], "returns": {"annotation": null}, "example": null, "fns": [], "demos": [["calculator", "import gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n if num2 == 0:\n raise gr.Error(\"Cannot divide by zero!\")\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\", \n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n examples=[\n [5, \"add\", 3],\n [4, \"divide\", 2],\n [-4, \"multiply\", 2.5],\n [0, \"subtract\", 1.2],\n ],\n title=\"Toy Calculator\",\n description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\nif __name__ == \"__main__\":\n demo.launch()\n"]], "parent": "gradio", "prev_obj": "Video", "next_obj": "load"}, "load": {"class": null, "name": "load", "description": "Method that constructs a Blocks from a Hugging Face repo. Can accept model repos (if src is \"models\") or Space repos (if src is \"spaces\"). The input and output components are automatically loaded from the repo.", "tags": {"parameters": "name: the name of the model (e.g. \"gpt2\" or \"facebook/bart-base\") or space (e.g. \"flax-community/spanish-gpt2\"), can include the `src` as prefix (e.g. \"models/facebook/bart-base\")
src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
api_key: Deprecated. Please use the `hf_token` parameter instead.
hf_token: optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.
alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)", "returns": "a Gradio Blocks object for the given model"}, "parameters": [{"name": "name", "annotation": "str", "doc": "the name of the model (e.g. \"gpt2\" or \"facebook/bart-base\") or space (e.g. \"flax-community/spanish-gpt2\"), can include the `src` as prefix (e.g. \"models/facebook/bart-base\")"}, {"name": "src", "annotation": "str | None", "doc": "the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)", "default": "None"}, {"name": "api_key", "annotation": "str | None", "doc": "Deprecated. Please use the `hf_token` parameter instead.", "default": "None"}, {"name": "hf_token", "annotation": "str | None", "doc": "optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.", "default": "None"}, {"name": "alias", "annotation": "str | None", "doc": "optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)", "default": "None"}], "returns": {"annotation": null, "doc": "a Gradio Blocks object for the given model"}, "example": "import gradio as gr\ndemo = gr.load(\"gradio/question-answering\", src=\"spaces\")\ndemo.launch()", "fns": [], "parent": "gradio", "prev_obj": "Error", "next_obj": "Examples"}, "examples": {"class": null, "name": "Examples", "description": "This class is a wrapper over the Dataset component and can be used to create Examples for Blocks / Interfaces. Populates the Dataset component with examples and assigns event listener so that clicking on an example populates the input/output components. Optionally handles example caching for fast inference.
", "tags": {"demos": "blocks_inputs, fake_gan", "guides": "more-on-examples-and-flagging, using-hugging-face-integrations, image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, create-your-own-friends-with-a-gan"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "examples", "annotation": "list[Any] | list[list[Any]] | str", "doc": "example inputs that can be clicked to populate specific components. Should be nested list, in which the outer list consists of samples and each inner list consists of an input corresponding to each input component. A string path to a directory of examples can also be provided but it should be within the directory with the python file running the gradio app. If there are multiple input components and a directory is provided, a log.csv file must be present in the directory to link corresponding inputs."}, {"name": "inputs", "annotation": "IOComponent | list[IOComponent]", "doc": "the component or list of components corresponding to the examples"}, {"name": "outputs", "annotation": "IOComponent | list[IOComponent] | None", "doc": "optionally, provide the component or list of components corresponding to the output of the examples. Required if `cache` is True.", "default": "None"}, {"name": "fn", "annotation": "Callable | None", "doc": "optionally, provide the function to run to generate the outputs corresponding to the examples. Required if `cache` is True.", "default": "None"}, {"name": "cache_examples", "annotation": "bool", "doc": "if True, caches examples for fast runtime. If True, then `fn` and `outputs` must be provided. If `fn` is a generator function, then the last yielded value will be used as the output.", "default": "False"}, {"name": "examples_per_page", "annotation": "int", "doc": "how many examples to show per page.", "default": "10"}, {"name": "label", "annotation": "str | None", "doc": "the label to use for the examples component (by default, \"Examples\")", "default": "\"Examples\""}, {"name": "elem_id", "annotation": "str | None", "doc": "an optional string that is assigned as the id of this component in the HTML DOM.", "default": "None"}, {"name": "run_on_click", "annotation": "bool", "doc": "if cache_examples is False, clicking on an example does not run the function when an example is clicked. Set this to True to run the function when an example is clicked. Has no effect if cache_examples is True.", "default": "False"}, {"name": "preprocess", "annotation": "bool", "doc": "if True, preprocesses the example input before running the prediction function and caching the output. Only applies if cache_examples is True.", "default": "True"}, {"name": "postprocess", "annotation": "bool", "doc": "if True, postprocesses the example output after running the prediction function and before caching. Only applies if cache_examples is True.", "default": "True"}, {"name": "api_name", "annotation": "str | None | Literal[False]", "doc": "Defines how the event associated with clicking on the examples appears in the API docs. Can be a string, None, or False. If False (default), the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name.", "default": "False"}, {"name": "batch", "annotation": "bool", "doc": "If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. Used only if cache_examples is True.", "default": "False"}], "returns": {"annotation": null}, "example": null, "fns": [], "demos": [["blocks_inputs", "import gradio as gr\nimport os\n\n\ndef combine(a, b):\n return a + \" \" + b\n\n\ndef mirror(x):\n return x\n\n\nwith gr.Blocks() as demo:\n\n txt = gr.Textbox(label=\"Input\", lines=2)\n txt_2 = gr.Textbox(label=\"Input 2\")\n txt_3 = gr.Textbox(value=\"\", label=\"Output\")\n btn = gr.Button(value=\"Submit\")\n btn.click(combine, inputs=[txt, txt_2], outputs=[txt_3])\n\n with gr.Row():\n im = gr.Image()\n im_2 = gr.Image()\n\n btn = gr.Button(value=\"Mirror Image\")\n btn.click(mirror, inputs=[im], outputs=[im_2])\n\n gr.Markdown(\"## Text Examples\")\n gr.Examples(\n [[\"hi\", \"Adam\"], [\"hello\", \"Eve\"]],\n [txt, txt_2],\n txt_3,\n combine,\n cache_examples=True,\n )\n gr.Markdown(\"## Image Examples\")\n gr.Examples(\n examples=[os.path.join(os.path.dirname(__file__), \"lion.jpg\")],\n inputs=im,\n outputs=im_2,\n fn=mirror,\n cache_examples=True,\n )\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["fake_gan", "# This demo needs to be run from the repo folder.\n# python demo/fake_gan/run.py\nimport random\n\nimport gradio as gr\n\n\ndef fake_gan():\n images = [\n (random.choice(\n [\n \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n ]\n ), f\"label {i}\" if i != 0 else \"label\" * 50)\n for i in range(3)\n ]\n return images\n\n\nwith gr.Blocks() as demo:\n with gr.Column(variant=\"panel\"):\n with gr.Row(variant=\"compact\"):\n text = gr.Textbox(\n label=\"Enter your prompt\",\n show_label=False,\n max_lines=1,\n placeholder=\"Enter your prompt\",\n ).style(\n container=False,\n )\n btn = gr.Button(\"Generate image\").style(full_width=False)\n\n gallery = gr.Gallery(\n label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n ).style(columns=[2], rows=[2], object_fit=\"contain\", height=\"auto\")\n\n btn.click(fake_gan, None, gallery)\n\nif __name__ == \"__main__\":\n demo.launch()\n"]], "guides": [{"name": "using-hugging-face-integrations", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": 1, "absolute_index": 16, "pretty_name": "Using Hugging Face Integrations", "content": "# Using Hugging Face Integrations\n\n\n\n\n\n\n## Introduction\n\nThe Hugging Face Hub is a central platform that has over 190,000 [models](https://huggingface.co/models), 32,000 [datasets](https://huggingface.co/datasets) and 40,000 [demos](https://huggingface.co/spaces), also known as Spaces. Although Hugging Face is famous for its \ud83e\udd17 transformers and diffusers libraries, the Hub also supports dozens of ML libraries, such as PyTorch, TensorFlow, spaCy, and many others across a variety of domains, from computer vision to reinforcement learning.\n\nGradio has multiple features that make it extremely easy to leverage existing models and Spaces on the Hub. This guide walks through these features.\n\n## Using regular inference with `pipeline`\n\nFirst, let's build a simple interface that translates text from English to Spanish. Between the over a thousand models shared by the University of Helsinki, there is an [existing model](https://huggingface.co/Helsinki-NLP/opus-mt-en-es), `opus-mt-en-es`, that does precisely this!\n\nThe \ud83e\udd17 transformers library has a very easy-to-use abstraction, [`pipeline()`](https://huggingface.co/docs/transformers/v4.16.2/en/main_classes/pipelines#transformers.pipeline) that handles most of the complex code to offer a simple API for common tasks. By specifying the task and an (optional) model, you can use an existing model with few lines:\n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndef predict(text):\n return pipe(text)[0][\"translation_text\"]\n \ndemo = gr.Interface(\n fn=predict, \n inputs='text',\n outputs='text',\n)\n\ndemo.launch()\n```\n\nBut `gradio` actually makes it even easier to convert a `pipeline` to a demo, simply by using the `gradio.Interface.from_pipeline` methods, which skips the need to specify the input and output components:\n\n```python\nfrom transformers import pipeline\nimport gradio as gr\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndemo = gr.Interface.from_pipeline(pipe)\ndemo.launch()\n```\n\nThe previous code produces the following interface, which you can try right here in your browser: \n\n\n\n\n\n## Using Hugging Face Inference API\n\nHugging Face has a free service called the [Inference API](https://huggingface.co/inference-api), which allows you to send HTTP requests to models in the Hub. For transformers or diffusers-based models, the API can be 2 to 10 times faster than running the inference yourself. The API is free (rate limited), and you can switch to dedicated [Inference Endpoints](https://huggingface.co/pricing) when you want to use it in production.\n\nLet's try the same demo as above but using the Inference API instead of loading the model yourself. Given a Hugging Face model supported in the Inference API, Gradio can automatically infer the expected input and output and make the underlying server calls, so you don't have to worry about defining the prediction function. Here is what the code would look like!\n\n```python\nimport gradio as gr\n\ndemo = gr.load(\"Helsinki-NLP/opus-mt-en-es\", src=\"models\")\n\ndemo.launch()\n```\n\nNotice that we just put specify the model name and state that the `src` should be `models` (Hugging Face's Model Hub). There is no need to install any dependencies (except `gradio`) since you are not loading the model on your computer.\n\nYou might notice that the first inference takes about 20 seconds. This happens since the Inference API is loading the model in the server. You get some benefits afterward:\n\n* The inference will be much faster.\n* The server caches your requests.\n* You get built-in automatic scaling.\n\n## Hosting your Gradio demos\n\n[Hugging Face Spaces](https://hf.co/spaces) allows anyone to host their Gradio demos freely, and uploading your Gradio demos take a couple of minutes. You can head to [hf.co/new-space](https://huggingface.co/new-space), select the Gradio SDK, create an `app.py` file, and voila! You have a demo you can share with anyone else. To learn more, read [this guide how to host on Hugging Face Spaces using the website](https://huggingface.co/blog/gradio-spaces).\n\n\nAlternatively, you can create a Space programmatically, making use of the [huggingface_hub client library](https://huggingface.co/docs/huggingface_hub/index) library. Here's an example:\n\n```python\nfrom huggingface_hub import (\n create_repo,\n get_full_repo_name,\n upload_file,\n)\ncreate_repo(name=target_space_name, token=hf_token, repo_type=\"space\", space_sdk=\"gradio\")\nrepo_name = get_full_repo_name(model_id=target_space_name, token=hf_token)\nfile_url = upload_file(\n path_or_fileobj=\"file.txt\",\n path_in_repo=\"app.py\",\n repo_id=repo_name,\n repo_type=\"space\",\n token=hf_token,\n)\n```\nHere, `create_repo` creates a gradio repo with the target name under a specific account using that account's Write Token. `repo_name` gets the full repo name of the related repo. Finally `upload_file` uploads a file inside the repo with the name `app.py`.\n\n\n\n## Embedding your Space demo on other websites\n\nThroughout this guide, you've seen many embedded Gradio demos. You can also do this on own website! The first step is to create a Hugging Face Space with the demo you want to showcase. Then, [follow the steps here to embed the Space on your website](/guides/sharing-your-app/#embedding-hosted-spaces).\n\n\n## Loading demos from Spaces\n\nYou can also use and remix existing Gradio demos on Hugging Face Spaces. For example, you could take two existing Gradio demos and put them as separate tabs and create a new demo. You can run this new demo locally, or upload it to Spaces, allowing endless possibilities to remix and create new demos!\n\nHere's an example that does exactly that:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Tab(\"Translate to Spanish\"):\n gr.load(\"gradio/helsinki_translation_en_es\", src=\"spaces\")\n with gr.Tab(\"Translate to French\"):\n gr.load(\"abidlabs/en2fr\", src=\"spaces\")\n\ndemo.launch()\n```\n\nNotice that we use `gr.load()`, the same method we used to load models using the Inference API. However, here we specify that the `src` is `spaces` (Hugging Face Spaces).\n\n## Recap\n\nThat's it! Let's recap the various ways Gradio and Hugging Face work together:\n\n1. You can convert a `transformers` pipeline into a Gradio demo using `from_pipeline()`\n2. You can build a demo around the Inference API without having to load the model easily using `gr.load()`\n3. You host your Gradio demo on Hugging Face Spaces, either using the GUI or entirely in Python.\n4. You can embed Gradio demos that are hosted on Hugging Face Spaces onto your own website.\n5. You can load demos from Hugging Face Spaces to remix and create new Gradio demos using `gr.load()`.\n\n\n\ud83e\udd17\n", "html": "

Using Hugging Face Integrations

\n\n

Introduction

\n\n

The Hugging Face Hub is a central platform that has over 190,000 models, 32,000 datasets and 40,000 demos, also known as Spaces. Although Hugging Face is famous for its \ud83e\udd17 transformers and diffusers libraries, the Hub also supports dozens of ML libraries, such as PyTorch, TensorFlow, spaCy, and many others across a variety of domains, from computer vision to reinforcement learning.

\n\n

Gradio has multiple features that make it extremely easy to leverage existing models and Spaces on the Hub. This guide walks through these features.

\n\n

Using regular inference with pipeline

\n\n

First, let's build a simple interface that translates text from English to Spanish. Between the over a thousand models shared by the University of Helsinki, there is an existing model, opus-mt-en-es, that does precisely this!

\n\n

The \ud83e\udd17 transformers library has a very easy-to-use abstraction, pipeline() that handles most of the complex code to offer a simple API for common tasks. By specifying the task and an (optional) model, you can use an existing model with few lines:

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndef predict(text):\n  return pipe(text)[0][\"translation_text\"]\n\ndemo = gr.Interface(\n  fn=predict, \n  inputs='text',\n  outputs='text',\n)\n\ndemo.launch()\n
\n\n

But gradio actually makes it even easier to convert a pipeline to a demo, simply by using the gradio.Interface.from_pipeline methods, which skips the need to specify the input and output components:

\n\n
from transformers import pipeline\nimport gradio as gr\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndemo = gr.Interface.from_pipeline(pipe)\ndemo.launch()\n
\n\n

The previous code produces the following interface, which you can try right here in your browser:

\n\n

\n\n

Using Hugging Face Inference API

\n\n

Hugging Face has a free service called the Inference API, which allows you to send HTTP requests to models in the Hub. For transformers or diffusers-based models, the API can be 2 to 10 times faster than running the inference yourself. The API is free (rate limited), and you can switch to dedicated Inference Endpoints when you want to use it in production.

\n\n

Let's try the same demo as above but using the Inference API instead of loading the model yourself. Given a Hugging Face model supported in the Inference API, Gradio can automatically infer the expected input and output and make the underlying server calls, so you don't have to worry about defining the prediction function. Here is what the code would look like!

\n\n
import gradio as gr\n\ndemo = gr.load(\"Helsinki-NLP/opus-mt-en-es\", src=\"models\")\n\ndemo.launch()\n
\n\n

Notice that we just put specify the model name and state that the src should be models (Hugging Face's Model Hub). There is no need to install any dependencies (except gradio) since you are not loading the model on your computer.

\n\n

You might notice that the first inference takes about 20 seconds. This happens since the Inference API is loading the model in the server. You get some benefits afterward:

\n\n
    \n
  • The inference will be much faster.
  • \n
  • The server caches your requests.
  • \n
  • You get built-in automatic scaling.
  • \n
\n\n

Hosting your Gradio demos

\n\n

Hugging Face Spaces allows anyone to host their Gradio demos freely, and uploading your Gradio demos take a couple of minutes. You can head to hf.co/new-space, select the Gradio SDK, create an app.py file, and voila! You have a demo you can share with anyone else. To learn more, read this guide how to host on Hugging Face Spaces using the website.

\n\n

Alternatively, you can create a Space programmatically, making use of the huggingface_hub client library library. Here's an example:

\n\n
from huggingface_hub import (\n    create_repo,\n    get_full_repo_name,\n    upload_file,\n)\ncreate_repo(name=target_space_name, token=hf_token, repo_type=\"space\", space_sdk=\"gradio\")\nrepo_name = get_full_repo_name(model_id=target_space_name, token=hf_token)\nfile_url = upload_file(\n    path_or_fileobj=\"file.txt\",\n    path_in_repo=\"app.py\",\n    repo_id=repo_name,\n    repo_type=\"space\",\n    token=hf_token,\n)\n
\n\n

Here, create_repo creates a gradio repo with the target name under a specific account using that account's Write Token. repo_name gets the full repo name of the related repo. Finally upload_file uploads a file inside the repo with the name app.py.

\n\n

Embedding your Space demo on other websites

\n\n

Throughout this guide, you've seen many embedded Gradio demos. You can also do this on own website! The first step is to create a Hugging Face Space with the demo you want to showcase. Then, follow the steps here to embed the Space on your website.

\n\n

Loading demos from Spaces

\n\n

You can also use and remix existing Gradio demos on Hugging Face Spaces. For example, you could take two existing Gradio demos and put them as separate tabs and create a new demo. You can run this new demo locally, or upload it to Spaces, allowing endless possibilities to remix and create new demos!

\n\n

Here's an example that does exactly that:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n  with gr.Tab(\"Translate to Spanish\"):\n    gr.load(\"gradio/helsinki_translation_en_es\", src=\"spaces\")\n  with gr.Tab(\"Translate to French\"):\n    gr.load(\"abidlabs/en2fr\", src=\"spaces\")\n\ndemo.launch()\n
\n\n

Notice that we use gr.load(), the same method we used to load models using the Inference API. However, here we specify that the src is spaces (Hugging Face Spaces).

\n\n

Recap

\n\n

That's it! Let's recap the various ways Gradio and Hugging Face work together:

\n\n
    \n
  1. You can convert a transformers pipeline into a Gradio demo using from_pipeline()
  2. \n
  3. You can build a demo around the Inference API without having to load the model easily using gr.load()
  4. \n
  5. You host your Gradio demo on Hugging Face Spaces, either using the GUI or entirely in Python.
  6. \n
  7. You can embed Gradio demos that are hosted on Hugging Face Spaces onto your own website.
  8. \n
  9. You can load demos from Hugging Face Spaces to remix and create new Gradio demos using gr.load().
  10. \n
\n\n

\ud83e\udd17

\n", "tags": ["HUB", "SPACES", "EMBED"], "spaces": ["https://huggingface.co/spaces/gradio/helsinki_translation_en_es"], "url": "/guides/using-hugging-face-integrations/", "contributor": "Omar Sanseviero \ud83e\udd99 "}, {"name": "image-classification-in-pytorch", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 20, "pretty_name": "Image Classification In Pytorch", "content": "# Image Classification in PyTorch\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained image classification model, so you should also have `torch` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from [PyTorch Hub](https://pytorch.org/hub/pytorch_vision_resnet/). You can use a different pretrained model or train your own. \n\n```python\nimport torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n```\n\nBecause we will be using the model for inference, we have called the `.eval()` method.\n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n inp = transforms.ToTensor()(inp).unsqueeze(0)\n with torch.no_grad():\n prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)} \n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `PIL` image\n\nThen, the function converts the image to a PIL Image and then eventually a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we use `Image(type=\"pil\")` which creates the component and handles the preprocessing to convert that to a `PIL` image. \n\nThe output component will be a `Label`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as `Label(num_top_classes=3)`.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n inputs=gr.Image(type=\"pil\"),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification in PyTorch

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained image classification model, so you should also have torch installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from PyTorch Hub. You can use a different pretrained model or train your own.

\n\n
import torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n
\n\n

Because we will be using the model for inference, we have called the .eval() method.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n  inp = transforms.ToTensor()(inp).unsqueeze(0)\n  with torch.no_grad():\n    prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n    confidences = {labels[i]: float(prediction[i]) for i in range(1000)}    \n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a PIL image
  • \n
\n\n

Then, the function converts the image to a PIL Image and then eventually a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we use Image(type=\"pil\") which creates the component and handles the preprocessing to convert that to a PIL image.

\n\n

The output component will be a Label, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as Label(num_top_classes=3).

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=gr.Image(type=\"pil\"),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "RESNET", "PYTORCH"], "spaces": ["https://huggingface.co/spaces/abidlabs/pytorch-image-classifier", "https://huggingface.co/spaces/pytorch/ResNet", "https://huggingface.co/spaces/pytorch/ResNext", "https://huggingface.co/spaces/pytorch/SqueezeNet"], "url": "/guides/image-classification-in-pytorch/", "contributor": null}, {"name": "image-classification-in-tensorflow", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 21, "pretty_name": "Image Classification In Tensorflow", "content": "# Image Classification in TensorFlow and Keras\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained Keras image classification model, so you should also have `tensorflow` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from [Keras](https://keras.io/api/applications/mobilenet/). You can use a different pretrained model or train your own. \n\n```python\nimport tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n```\n\nThis line automatically downloads the MobileNet model and weights using the Keras library. \n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `numpy` array\n\nThen, the function adds a batch dimension, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we can use the `\"gradio.inputs.Image\"` class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.\n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=classify_image, \n inputs=gr.Image(shape=(224, 224)),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification in TensorFlow and Keras

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained Keras image classification model, so you should also have tensorflow installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from Keras. You can use a different pretrained model or train your own.

\n\n
import tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n
\n\n

This line automatically downloads the MobileNet model and weights using the Keras library.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n  inp = inp.reshape((-1, 224, 224, 3))\n  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n  prediction = inception_net.predict(inp).flatten()\n  confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a numpy array
  • \n
\n\n

Then, the function adds a batch dimension, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we can use the \"gradio.inputs.Image\" class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=classify_image, \n             inputs=gr.Image(shape=(224, 224)),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "MOBILENET", "TENSORFLOW"], "spaces": ["https://huggingface.co/spaces/abidlabs/keras-image-classifier"], "url": "/guides/image-classification-in-tensorflow/", "contributor": null}, {"name": "image-classification-with-vision-transformers", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 22, "pretty_name": "Image Classification With Vision Transformers", "content": "# Image Classification with Vision Transformers\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control. \n\nState-of-the-art image classifiers are based on the *transformers* architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a **single line of Python**, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started).\n\n## Step 1 \u2014 Choosing a Vision Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a model from the [Hugging Face Model Hub](https://huggingface.co/models?pipeline_tag=image-classification). The Hub contains thousands of models covering dozens of different machine learning tasks. \n\nExpand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.\n\nAt the time of writing, the most popular one is `google/vit-base-patch16-224`, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo. \n\n## Step 2 \u2014 Loading the Vision Transformer Model with Gradio\n\nWhen using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.\n\nBesides the import statement, it only takes a single line of Python to load and launch the demo. \n\nWe use the `gr.Interface.load()` method and pass in the path to the model including the `huggingface/` to designate that it is from the Hugging Face Hub.\n\n```python\nimport gradio as gr\n\ngr.Interface.load(\n \"huggingface/google/vit-base-patch16-224\",\n examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n```\n\nNotice that we have added one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. \n\nThis produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!\n\n\n\n----------\n\nAnd you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification with Vision Transformers

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control.

\n\n

State-of-the-art image classifiers are based on the transformers architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a single line of Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Step 1 \u2014 Choosing a Vision Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a model from the Hugging Face Model Hub. The Hub contains thousands of models covering dozens of different machine learning tasks.

\n\n

Expand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.

\n\n

At the time of writing, the most popular one is google/vit-base-patch16-224, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo.

\n\n

Step 2 \u2014 Loading the Vision Transformer Model with Gradio

\n\n

When using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.

\n\n

Besides the import statement, it only takes a single line of Python to load and launch the demo.

\n\n

We use the gr.Interface.load() method and pass in the path to the model including the huggingface/ to designate that it is from the Hugging Face Hub.

\n\n
import gradio as gr\n\ngr.Interface.load(\n             \"huggingface/google/vit-base-patch16-224\",\n             examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n
\n\n

Notice that we have added one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples.

\n\n

This produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!

\n\n\n\n
\n\n

And you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "TRANSFORMERS", "HUB"], "spaces": ["https://huggingface.co/spaces/abidlabs/vision-transformer"], "url": "/guides/image-classification-with-vision-transformers/", "contributor": null}, {"name": "create-your-own-friends-with-a-gan", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 34, "pretty_name": "Create Your Own Friends With A Gan", "content": "# Create Your Own Friends with a GAN\n\n\n\n\n\n\n\n## Introduction\n\nIt seems that cryptocurrencies, [NFTs](https://www.nytimes.com/interactive/2022/03/18/technology/nft-guide.html), and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets [may be taxable, such as in Canada](https://www.canada.ca/en/revenue-agency/programs/about-canada-revenue-agency-cra/compliance/digital-currency/cryptocurrency-guide.html), today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated [CryptoPunks](https://www.larvalabs.com/cryptopunks).\n\nGenerative Adversarial Networks, often known just as *GANs*, are a specific class of deep-learning models that are designed to learn from an input dataset to create (*generate!*) new material that is convincingly similar to elements of the original training set. Famously, the website [thispersondoesnotexist.com](https://thispersondoesnotexist.com/) went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even [music](https://salu133445.github.io/musegan/)!\n\nToday we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:\n\n\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained model, also install `torch` and `torchvision`.\n\n## GANs: a very brief introduction\n\nOriginally proposed in [Goodfellow et al. 2014](https://arxiv.org/abs/1406.2661), GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the *generator*, is responsible for generating images. The other network, the *discriminator*, receives an image at a time from the generator along with a **real** image from the training data set. The discriminator then has to guess: which image is the fake?\n\nThe generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (*adversarial!*) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!\n\nFor a more in-depth look at GANs, you can take a look at [this excellent post on Analytics Vidhya](https://www.analyticsvidhya.com/blog/2021/06/a-detailed-explanation-of-gan-with-implementation-using-tensorflow-and-keras/) or this [PyTorch tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html). For now, though, we'll dive into a demo!\n\n## Step 1 \u2014 Create the Generator model\n\nTo generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:\n\n```python\nfrom torch import nn\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n```\n\nWe're taking the generator from [this repo by @teddykoker](https://github.com/teddykoker/cryptopunks-gan/blob/main/train.py#L90), where you can also see the original discriminator model structure.\n\nAfter instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at [nateraw/cryptopunks-gan](https://huggingface.co/nateraw/cryptopunks-gan):\n\n```python\nfrom huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n```\n\n## Step 2 \u2014 Defining a `predict` function\n\nThe `predict` function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our `predict` function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use `torchvision`'s `save_image` function to save the output of the model as a `png` file, and return the file name:\n\n```python\nfrom torchvision.utils import save_image\n\ndef predict(seed):\n num_punks = 4\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWe're giving our `predict` function a `seed` parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.\n\n*Note!* Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.\n\n## Step 3 \u2014 Creating a Gradio interface\n\nAt this point you can even run the code you have with `predict()`, and you'll find your freshly generated punks in your file system at `./punks.png`. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:\n\n* Set a slider input so users can choose the \"seed\" value\n* Use an image component for our output to showcase the generated punks\n* Use our `predict()` to take the seed and generate the images\n\nWith `gr.Interface()`, we can define all of that with a single function call:\n\n```python\nimport gradio as gr\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n ],\n outputs=\"image\",\n).launch()\n```\n\nLaunching the interface should present you with something like this:\n\n\n\n## Step 4 \u2014 Even more punks!\n\nGenerating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the `inputs` list that we pass to `gr.Interface`:\n\n```python\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n ],\n outputs=\"image\",\n).launch()\n```\n\nThe new input will be passed to our `predict()` function, so we have to make some changes to that function to accept a new parameter:\n\n```python\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWhen you relaunch your interface, you should see a second slider that'll let you control the number of punks!\n\n## Step 5 - Polishing it up\n\nYour Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728\n\nWe can add some examples that users can easily try out by adding this to the `gr.Interface`:\n\n```python\ngr.Interface(\n # ...\n # keep everything as it is, and then add\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n```\n\nThe `examples` parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the `inputs`. So in our case, `[seed, num_punks]`. Give it a try!\n\nYou can also try adding a `title`, `description`, and `article` to the `gr.Interface`. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 `article` will also accept HTML, as [explored in a previous guide](/guides/key-features/#descriptive-content)!\n\nWhen you're all done, you may end up with something like this:\n\n\n\nFor reference, here is our full code:\n\n```python\nimport torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n ],\n outputs=\"image\",\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n```\n----------\n\nCongratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can [scour the Hub for more GANs](https://huggingface.co/models?other=gan) (or train your own) and continue making even more awesome demos \ud83e\udd17", "html": "

Create Your Own Friends with a GAN

\n\n

Introduction

\n\n

It seems that cryptocurrencies, NFTs, and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets may be taxable, such as in Canada, today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated CryptoPunks.

\n\n

Generative Adversarial Networks, often known just as GANs, are a specific class of deep-learning models that are designed to learn from an input dataset to create (generate!) new material that is convincingly similar to elements of the original training set. Famously, the website thispersondoesnotexist.com went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even music!

\n\n

Today we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:

\n\n\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained model, also install torch and torchvision.

\n\n

GANs: a very brief introduction

\n\n

Originally proposed in Goodfellow et al. 2014, GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the generator, is responsible for generating images. The other network, the discriminator, receives an image at a time from the generator along with a real image from the training data set. The discriminator then has to guess: which image is the fake?

\n\n

The generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (adversarial!) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!

\n\n

For a more in-depth look at GANs, you can take a look at this excellent post on Analytics Vidhya or this PyTorch tutorial. For now, though, we'll dive into a demo!

\n\n

Step 1 \u2014 Create the Generator model

\n\n

To generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:

\n\n
from torch import nn\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n
\n\n

We're taking the generator from this repo by @teddykoker, where you can also see the original discriminator model structure.

\n\n

After instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at nateraw/cryptopunks-gan:

\n\n
from huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n
\n\n

Step 2 \u2014 Defining a predict function

\n\n

The predict function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our predict function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use torchvision's save_image function to save the output of the model as a png file, and return the file name:

\n\n
from torchvision.utils import save_image\n\ndef predict(seed):\n    num_punks = 4\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

We're giving our predict function a seed parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.

\n\n

Note! Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.

\n\n

Step 3 \u2014 Creating a Gradio interface

\n\n

At this point you can even run the code you have with predict(<SOME_NUMBER>), and you'll find your freshly generated punks in your file system at ./punks.png. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:

\n\n
    \n
  • Set a slider input so users can choose the \"seed\" value
  • \n
  • Use an image component for our output to showcase the generated punks
  • \n
  • Use our predict() to take the seed and generate the images
  • \n
\n\n

With gr.Interface(), we can define all of that with a single function call:

\n\n
import gradio as gr\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

Launching the interface should present you with something like this:

\n\n\n\n

Step 4 \u2014 Even more punks!

\n\n

Generating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the inputs list that we pass to gr.Interface:

\n\n
gr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

The new input will be passed to our predict() function, so we have to make some changes to that function to accept a new parameter:

\n\n
def predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

When you relaunch your interface, you should see a second slider that'll let you control the number of punks!

\n\n

Step 5 - Polishing it up

\n\n

Your Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728

\n\n

We can add some examples that users can easily try out by adding this to the gr.Interface:

\n\n
gr.Interface(\n    # ...\n    # keep everything as it is, and then add\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n
\n\n

The examples parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the inputs. So in our case, [seed, num_punks]. Give it a try!

\n\n

You can also try adding a title, description, and article to the gr.Interface. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 article will also accept HTML, as explored in a previous guide!

\n\n

When you're all done, you may end up with something like this:

\n\n\n\n

For reference, here is our full code:

\n\n
import torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n    ],\n    outputs=\"image\",\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n
\n\n
\n\n

Congratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can scour the Hub for more GANs (or train your own) and continue making even more awesome demos \ud83e\udd17

\n", "tags": ["GAN", "IMAGE", "HUB"], "spaces": ["https://huggingface.co/spaces/NimaBoscarino/cryptopunks", "https://huggingface.co/spaces/nateraw/cryptopunks-generator"], "url": "/guides/create-your-own-friends-with-a-gan/", "contributor": "Nima Boscarino and Nate Raw"}], "parent": "gradio", "prev_obj": "load", "next_obj": "Progress"}, "progress": {"class": null, "name": "Progress", "description": "The Progress class provides a custom progress tracker that is used in a function signature. To attach a Progress tracker to a function, simply add a parameter right after the input parameters that has a default value set to a `gradio.Progress()` instance. The Progress tracker can then be updated in the function by calling the Progress object or using the `tqdm` method on an Iterable. The Progress tracker is currently only available with `queue()`.", "tags": {"demos": "progress"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "track_tqdm", "annotation": "bool", "doc": "If True, the Progress object will track any tqdm.tqdm iterations with the tqdm library in the function.", "default": "False"}], "returns": {"annotation": null}, "example": "import gradio as gr\nimport time\ndef my_function(x, progress=gr.Progress()):\n progress(0, desc=\"Starting...\")\n time.sleep(1)\n for i in progress.tqdm(range(100)):\n time.sleep(0.1)\n return x\ngr.Interface(my_function, gr.Textbox(), gr.Textbox()).queue().launch()", "fns": [{"fn": null, "name": "__call__", "description": "Updates progress tracker with progress and message text.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "progress", "annotation": "float | tuple[int, int | None] | None", "doc": "If float, should be between 0 and 1 representing completion. If Tuple, first number represents steps completed, and second value represents total steps or None if unknown. If None, hides progress bar."}, {"name": "desc", "annotation": "str | None", "doc": "description to display.", "default": "None"}, {"name": "total", "annotation": "int | None", "doc": "estimated total number of steps.", "default": "None"}, {"name": "unit", "annotation": "str", "doc": "unit of iterations.", "default": "\"steps\""}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Progress"}, {"fn": null, "name": "tqdm", "description": "Attaches progress tracker to iterable, like tqdm.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "iterable", "annotation": "Iterable | None", "doc": "iterable to attach progress tracker to."}, {"name": "desc", "annotation": "str | None", "doc": "description to display.", "default": "None"}, {"name": "total", "annotation": "int | None", "doc": "estimated total number of steps.", "default": "None"}, {"name": "unit", "annotation": "str", "doc": "unit of iterations.", "default": "\"steps\""}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Progress"}], "demos": [["progress", "import gradio as gr\nimport random\nimport time\nimport tqdm\nfrom datasets import load_dataset\nimport shutil\nfrom uuid import uuid4\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text = gr.Textbox()\n textb = gr.Textbox()\n with gr.Row():\n load_set_btn = gr.Button(\"Load Set\")\n load_nested_set_btn = gr.Button(\"Load Nested Set\")\n load_random_btn = gr.Button(\"Load Random\")\n clean_imgs_btn = gr.Button(\"Clean Images\")\n wait_btn = gr.Button(\"Wait\")\n do_all_btn = gr.Button(\"Do All\")\n track_tqdm_btn = gr.Button(\"Bind TQDM\")\n bind_internal_tqdm_btn = gr.Button(\"Bind Internal TQDM\")\n\n text2 = gr.Textbox()\n\n # track list\n def load_set(text, text2, progress=gr.Progress()):\n imgs = [None] * 24\n for img in progress.tqdm(imgs, desc=\"Loading from list\"):\n time.sleep(0.1)\n return \"done\"\n load_set_btn.click(load_set, [text, textb], text2)\n\n # track nested list\n def load_nested_set(text, text2, progress=gr.Progress()):\n imgs = [[None] * 8] * 3\n for img_set in progress.tqdm(imgs, desc=\"Nested list\"):\n time.sleep(2)\n for img in progress.tqdm(img_set, desc=\"inner list\"):\n time.sleep(0.1)\n return \"done\"\n load_nested_set_btn.click(load_nested_set, [text, textb], text2)\n\n # track iterable of unknown length\n def load_random(data, progress=gr.Progress()):\n def yielder():\n for i in range(0, random.randint(15, 20)):\n time.sleep(0.1)\n yield None\n for img in progress.tqdm(yielder()):\n pass\n return \"done\"\n load_random_btn.click(load_random, {text, textb}, text2)\n \n # manual progress\n def clean_imgs(text, progress=gr.Progress()):\n progress(0.2, desc=\"Collecting Images\")\n time.sleep(1)\n progress(0.5, desc=\"Cleaning Images\")\n time.sleep(1.5)\n progress(0.8, desc=\"Sending Images\")\n time.sleep(1.5)\n return \"done\"\n clean_imgs_btn.click(clean_imgs, text, text2)\n\n # no progress\n def wait(text):\n time.sleep(4)\n return \"done\"\n wait_btn.click(wait, text, text2)\n\n # multiple progressions\n def do_all(data, progress=gr.Progress()):\n load_set(data[text], data[textb], progress)\n load_random(data, progress)\n clean_imgs(data[text], progress)\n progress(None)\n wait(text)\n return \"done\"\n do_all_btn.click(do_all, {text, textb}, text2)\n\n def track_tqdm(data, progress=gr.Progress(track_tqdm=True)):\n for i in tqdm.tqdm(range(5), desc=\"outer\"):\n for j in tqdm.tqdm(range(4), desc=\"inner\"):\n time.sleep(1)\n return \"done\"\n track_tqdm_btn.click(track_tqdm, {text, textb}, text2)\n\n def bind_internal_tqdm(data, progress=gr.Progress(track_tqdm=True)):\n outdir = \"__tmp/\" + str(uuid4())\n load_dataset(\"beans\", split=\"train\", cache_dir=outdir)\n shutil.rmtree(outdir)\n return \"done\"\n bind_internal_tqdm_btn.click(bind_internal_tqdm, {text, textb}, text2)\n\n\nif __name__ == \"__main__\":\n demo.queue(concurrency_count=20).launch()\n"]], "parent": "gradio", "prev_obj": "Examples", "next_obj": "update"}, "update": {"class": null, "name": "update", "description": "Updates component properties. When a function passed into a Gradio Interface or a Blocks events returns a typical value, it updates the value of the output component. But it is also possible to update the properties of an output component (such as the number of lines of a `Textbox` or the visibility of an `Image`) by returning the component's `update()` function, which takes as parameters any of the constructor parameters for that component. This is a shorthand for using the update method on a component. For example, rather than using gr.Number.update(...) you can just use gr.update(...). Note that your editor's autocompletion will suggest proper parameters if you use the update method on the component.
", "tags": {"demos": "blocks_essay, blocks_update, blocks_essay_update", "parameters": "kwargs: Key-word arguments used to update the component's properties."}, "parameters": [{"name": "kwargs", "annotation": "", "doc": "Key-word arguments used to update the component's properties."}], "returns": {"annotation": null}, "example": "# Blocks Example\nimport gradio as gr\nwith gr.Blocks() as demo:\n radio = gr.Radio([1, 2, 4], label=\"Set the value of the number\")\n number = gr.Number(value=2, interactive=True)\n radio.change(fn=lambda value: gr.update(value=value), inputs=radio, outputs=number)\ndemo.launch()\n\n# Interface example\nimport gradio as gr\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.Textbox.update(lines=2, visible=True)\n elif choice == \"long\":\n return gr.Textbox.update(lines=8, visible=True)\n else:\n return gr.Textbox.update(visible=False)\ngr.Interface(\n change_textbox,\n gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n ),\n gr.Textbox(lines=2),\n live=True,\n).launch()", "fns": [], "demos": [["blocks_essay", "import gradio as gr\n\n\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.Textbox.update(lines=2, visible=True)\n elif choice == \"long\":\n return gr.Textbox.update(lines=8, visible=True)\n else:\n return gr.Textbox.update(visible=False)\n\n\nwith gr.Blocks() as demo:\n radio = gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n )\n text = gr.Textbox(lines=2, interactive=True).style(show_copy_button=True)\n\n radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["blocks_update", "import gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Animal Generator\n Once you select a species, the detail panel should be visible.\n \"\"\"\n )\n\n species = gr.Radio(label=\"Animal Class\", choices=[\"Mammal\", \"Fish\", \"Bird\"])\n animal = gr.Dropdown(label=\"Animal\", choices=[])\n\n with gr.Column(visible=False) as details_col:\n weight = gr.Slider(0, 20)\n details = gr.Textbox(label=\"Extra Details\")\n generate_btn = gr.Button(\"Generate\")\n output = gr.Textbox(label=\"Output\")\n\n species_map = {\n \"Mammal\": [\"Elephant\", \"Giraffe\", \"Hamster\"],\n \"Fish\": [\"Shark\", \"Salmon\", \"Tuna\"],\n \"Bird\": [\"Chicken\", \"Eagle\", \"Hawk\"],\n }\n\n def filter_species(species):\n return gr.Dropdown.update(\n choices=species_map[species], value=species_map[species][1]\n ), gr.update(visible=True)\n\n species.change(filter_species, species, [animal, details_col])\n\n def filter_weight(animal):\n if animal in (\"Elephant\", \"Shark\", \"Giraffe\"):\n return gr.update(maximum=100)\n else:\n return gr.update(maximum=20)\n\n animal.change(filter_weight, animal, weight)\n weight.change(lambda w: gr.update(lines=int(w / 10) + 1), weight, details)\n\n generate_btn.click(lambda x: x, details, output)\n\n\nif __name__ == \"__main__\":\n demo.launch()"], ["blocks_essay_update", "import gradio as gr\n\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.update(lines=2, visible=True, value=\"Short story: \")\n elif choice == \"long\":\n return gr.update(lines=8, visible=True, value=\"Long story...\")\n else:\n return gr.update(visible=False)\n\nwith gr.Blocks() as demo:\n radio = gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"Essay Length to Write?\"\n )\n text = gr.Textbox(lines=2, interactive=True)\n radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\nif __name__ == \"__main__\":\n demo.launch()"]], "parent": "gradio", "prev_obj": "Progress", "next_obj": "make_waveform"}, "make_waveform": {"class": null, "name": "make_waveform", "description": "Generates a waveform video from an audio file. Useful for creating an easy to share audio visualization. The output should be passed into a `gr.Video` component.", "tags": {"parameters": "audio: Audio file path or tuple of (sample_rate, audio_data)
bg_color: Background color of waveform (ignored if bg_image is provided)
bg_image: Background image of waveform
fg_alpha: Opacity of foreground waveform
bars_color: Color of waveform bars. Can be a single color or a tuple of (start_color, end_color) of gradient
bar_count: Number of bars in waveform
bar_width: Width of bars in waveform. 1 represents full width, 0.5 represents half width, etc.
animate: If true, the audio waveform overlay will be animated, if false, it will be static.", "returns": "A filepath to the output video in mp4 format."}, "parameters": [{"name": "audio", "annotation": "str | tuple[int, np.ndarray]", "doc": "Audio file path or tuple of (sample_rate, audio_data)"}, {"name": "bg_color", "annotation": "str", "doc": "Background color of waveform (ignored if bg_image is provided)", "default": "\"#f3f4f6\""}, {"name": "bg_image", "annotation": "str | None", "doc": "Background image of waveform", "default": "None"}, {"name": "fg_alpha", "annotation": "float", "doc": "Opacity of foreground waveform", "default": "0.75"}, {"name": "bars_color", "annotation": "str | tuple[str, str]", "doc": "Color of waveform bars. Can be a single color or a tuple of (start_color, end_color) of gradient", "default": "('#fbbf24', '#ea580c')"}, {"name": "bar_count", "annotation": "int", "doc": "Number of bars in waveform", "default": "50"}, {"name": "bar_width", "annotation": "float", "doc": "Width of bars in waveform. 1 represents full width, 0.5 represents half width, etc.", "default": "0.6"}, {"name": "animate", "annotation": "bool", "doc": "If true, the audio waveform overlay will be animated, if false, it will be static.", "default": "False"}], "returns": {"annotation": null, "doc": "A filepath to the output video in mp4 format."}, "example": null, "fns": [], "parent": "gradio", "prev_obj": "update", "next_obj": "EventData"}, "eventdata": {"class": null, "name": "EventData", "description": "When a subclass of EventData is added as a type hint to an argument of an event listener method, this object will be passed as that argument. It contains information about the event that triggered the listener, such the target object, and other data related to the specific event that are attributes of the subclass.
", "tags": {"demos": "gallery_selections, tictactoe"}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "target", "annotation": "Block | None", "doc": "The target object that triggered the event. Can be used to distinguish if multiple components are bound to the same listener."}], "returns": {"annotation": null}, "example": "table = gr.Dataframe([[1, 2, 3], [4, 5, 6]])\ngallery = gr.Gallery([(\"cat.jpg\", \"Cat\"), (\"dog.jpg\", \"Dog\")])\ntextbox = gr.Textbox(\"Hello World!\")\n\nstatement = gr.Textbox()\n\ndef on_select(evt: gr.SelectData): # SelectData is a subclass of EventData\n return f\"You selected {evt.value} at {evt.index} from {evt.target}\"\n\ntable.select(on_select, None, statement)\ngallery.select(on_select, None, statement)\ntextbox.select(on_select, None, statement)", "fns": [], "demos": [["gallery_selections", "import gradio as gr\nimport numpy as np\n\nwith gr.Blocks() as demo:\n imgs = gr.State()\n gallery = gr.Gallery()\n\n def generate_images():\n images = []\n for _ in range(9):\n image = np.ones((100, 100, 3), dtype=np.uint8) * np.random.randint(\n 0, 255, 3\n ) # image is a solid single color\n images.append(image)\n return images, images\n\n demo.load(generate_images, None, [gallery, imgs])\n\n with gr.Row():\n selected = gr.Number(show_label=False, placeholder=\"Selected\")\n darken_btn = gr.Button(\"Darken selected\")\n\n def get_select_index(evt: gr.SelectData):\n return evt.index\n\n gallery.select(get_select_index, None, selected)\n\n def darken_img(imgs, index):\n index = int(index)\n imgs[index] = np.round(imgs[index] * 0.8).astype(np.uint8)\n return imgs, imgs\n\n darken_btn.click(darken_img, [imgs, selected], [imgs, gallery])\n\nif __name__ == \"__main__\":\n demo.launch()\n"], ["tictactoe", "import gradio as gr\n\nwith gr.Blocks() as demo:\n turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n\n def place(board, turn, evt: gr.SelectData):\n if evt.value:\n return board, turn\n board[evt.index[0]][evt.index[1]] = turn\n turn = \"O\" if turn == \"X\" else \"X\"\n return board, turn\n\n board.select(place, [board, turn], [board, turn])\n\nif __name__ == \"__main__\":\n demo.launch()"]], "parent": "gradio", "prev_obj": "make_waveform", "next_obj": "Warning"}, "warning": {"class": null, "name": "Warning", "description": "This function allows you to pass custom warning messages to the user. You can do so simply with `gr.Warning('message here')`, and when that line is executed the custom message will appear in a modal on the demo.", "tags": {"parameters": "message: The warning message to be displayed to the user."}, "parameters": [{"name": "message", "annotation": "str", "doc": "The warning message to be displayed to the user.", "default": "\"Warning issued.\""}], "returns": {"annotation": null}, "example": null, "fns": [], "parent": "gradio", "prev_obj": "EventData", "next_obj": "Info"}, "info": {"class": null, "name": "Info", "description": "", "tags": {"parameters": "message: The info message to be displayed to the user."}, "parameters": [{"name": "message", "annotation": "str", "doc": "The info message to be displayed to the user.", "default": "\"Info issued.\""}], "returns": {"annotation": null}, "example": null, "fns": [], "parent": "gradio", "prev_obj": "Warning", "next_obj": "Request"}}, "routes": {"request": {"class": null, "name": "Request", "description": "A Gradio request object that can be used to access the request headers, cookies, query parameters and other information about the request from within the prediction function. The class is a thin wrapper around the fastapi.Request class. Attributes of this class include: `headers`, `client`, `query_params`, and `path_params`. If auth is enabled, the `username` attribute can be used to get the logged in user.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "request", "annotation": "fastapi.Request | None", "doc": "A fastapi.Request", "default": "None"}, {"name": "username", "annotation": "str | None", "doc": null, "default": "None"}], "returns": {"annotation": null}, "example": "import gradio as gr\ndef echo(name, request: gr.Request):\n print(\"Request headers dictionary:\", request.headers)\n print(\"IP address:\", request.client.host)\n return name\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()", "fns": [], "parent": "gradio", "prev_obj": "make_waveform", "next_obj": "mount_gradio_app"}, "mount_gradio_app": {"class": null, "name": "mount_gradio_app", "description": "Mount a gradio.Blocks to an existing FastAPI application.
", "tags": {"parameters": "app: The parent FastAPI application.
blocks: The blocks object we want to mount to the parent app.
path: The path at which the gradio application will be mounted.
gradio_api_url: The full url at which the gradio app will run. This is only needed if deploying to Huggingface spaces of if the websocket endpoints of your deployed app are on a different network location than the gradio app. If deploying to spaces, set gradio_api_url to 'http://localhost:7860/'
app_kwargs: Additional keyword arguments to pass to the underlying FastAPI app as a dictionary of parameter keys and argument values. For example, `{\"docs_url\": \"/docs\"}`"}, "parameters": [{"name": "app", "annotation": "fastapi.FastAPI", "doc": "The parent FastAPI application."}, {"name": "blocks", "annotation": "gradio.Blocks", "doc": "The blocks object we want to mount to the parent app."}, {"name": "path", "annotation": "str", "doc": "The path at which the gradio application will be mounted."}, {"name": "gradio_api_url", "annotation": "str | None", "doc": "The full url at which the gradio app will run. This is only needed if deploying to Huggingface spaces of if the websocket endpoints of your deployed app are on a different network location than the gradio app. If deploying to spaces, set gradio_api_url to 'http://localhost:7860/'", "default": "None"}, {"name": "app_kwargs", "annotation": "dict[str, Any] | None", "doc": "Additional keyword arguments to pass to the underlying FastAPI app as a dictionary of parameter keys and argument values. For example, `{\"docs_url\": \"/docs\"}`", "default": "None"}], "returns": {"annotation": null}, "example": "from fastapi import FastAPI\nimport gradio as gr\napp = FastAPI()\n@app.get(\"/\")\ndef read_main():\n return {\"message\": \"This is your main app\"}\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=\"/gradio\")\n# Then run `uvicorn run:app` from the terminal and navigate to http://localhost:8000/gradio.", "fns": [], "parent": "gradio", "prev_obj": "Request", "next_obj": "Python-Client"}}, "events": ["change", "input", "click", "submit", "edit", "clear", "play", "pause", "stop", "end", "stream", "start_recording", "stop_recording", "focus", "blur", "upload", "release", "select"], "py-client": {"client": {"class": null, "name": "Client", "description": "The main Client class for the Python client. This class is used to connect to a remote Gradio app and call its API endpoints.
", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "src", "annotation": "str", "doc": "Either the name of the Hugging Face Space to load, (e.g. \"abidlabs/whisper-large-v2\") or the full URL (including \"http\" or \"https\") of the hosted Gradio app to load (e.g. \"http://mydomain.com/app\" or \"https://bec81a83-5b5c-471e.gradio.live/\")."}, {"name": "hf_token", "annotation": "str | None", "doc": "The Hugging Face token to use to access private Spaces. Automatically fetched if you are logged in via the Hugging Face Hub CLI. Obtain from: https://huggingface.co/settings/token", "default": "None"}, {"name": "max_workers", "annotation": "int", "doc": "The maximum number of thread workers that can be used to make requests to the remote Gradio app simultaneously.", "default": "40"}, {"name": "serialize", "annotation": "bool", "doc": "Whether the client should serialize the inputs and deserialize the outputs of the remote API. If set to False, the client will pass the inputs and outputs as-is, without serializing/deserializing them. E.g. you if you set this to False, you'd submit an image in base64 format instead of a filepath, and you'd get back an image in base64 format from the remote API instead of a filepath.", "default": "True"}, {"name": "output_dir", "annotation": "str | Path | None", "doc": "The directory to save files that are downloaded from the remote API. If None, reads from the GRADIO_TEMP_DIR environment variable. Defaults to a temporary directory on your machine.", "default": "\"/var/folders/lt/_bbyb3m10xbb9cpp4x7qs2rc0000gn/T/gradio\""}, {"name": "verbose", "annotation": "bool", "doc": "Whether the client should print statements to the console.", "default": "True"}], "returns": {"annotation": null}, "example": "from gradio_client import Client\n\nclient = Client(\"abidlabs/whisper-large-v2\") # connecting to a Hugging Face Space\nclient.predict(\"test.mp4\", api_name=\"/predict\")\n>> What a nice recording! # returns the result of the remote API call\n\nclient = Client(\"https://bec81a83-5b5c-471e.gradio.live\") # connecting to a temporary Gradio share URL\njob = client.submit(\"hello\", api_name=\"/predict\") # runs the prediction in a background thread\njob.result()\n>> 49 # returns the result of the remote API call (blocking call)", "fns": [{"fn": null, "name": "predict", "description": "Calls the Gradio API and returns the result (this is a blocking call).
", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "args", "annotation": "", "doc": "The arguments to pass to the remote API. The order of the arguments must match the order of the inputs in the Gradio app."}, {"name": "api_name", "annotation": "str | None", "doc": "The name of the API endpoint to call starting with a leading slash, e.g. \"/predict\". Does not need to be provided if the Gradio app has only one named API endpoint.", "default": "None"}, {"name": "fn_index", "annotation": "int | None", "doc": "As an alternative to api_name, this parameter takes the index of the API endpoint to call, e.g. 0. Both api_name and fn_index can be provided, but if they conflict, api_name will take precedence.", "default": "None"}], "returns": {"annotation": "Any", "doc": "The result of the API call. Will be a Tuple if the API has multiple outputs."}, "example": "from gradio_client import Client\nclient = Client(src=\"gradio/calculator\")\nclient.predict(5, \"add\", 4, api_name=\"/predict\")\n>> 9.0", "override_signature": null, "parent": "gradio.Client"}, {"fn": null, "name": "submit", "description": "Creates and returns a Job object which calls the Gradio API in a background thread. The job can be used to retrieve the status and result of the remote API call.
", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "args", "annotation": "", "doc": "The arguments to pass to the remote API. The order of the arguments must match the order of the inputs in the Gradio app."}, {"name": "api_name", "annotation": "str | None", "doc": "The name of the API endpoint to call starting with a leading slash, e.g. \"/predict\". Does not need to be provided if the Gradio app has only one named API endpoint.", "default": "None"}, {"name": "fn_index", "annotation": "int | None", "doc": "As an alternative to api_name, this parameter takes the index of the API endpoint to call, e.g. 0. Both api_name and fn_index can be provided, but if they conflict, api_name will take precedence.", "default": "None"}, {"name": "result_callbacks", "annotation": "Callable | list[Callable] | None", "doc": "A callback function, or list of callback functions, to be called when the result is ready. If a list of functions is provided, they will be called in order. The return values from the remote API are provided as separate parameters into the callback. If None, no callback will be called.", "default": "None"}], "returns": {"annotation": "Job", "doc": "A Job object that can be used to retrieve the status and result of the remote API call."}, "example": "from gradio_client import Client\nclient = Client(src=\"gradio/calculator\")\njob = client.submit(5, \"add\", 4, api_name=\"/predict\")\njob.status()\n>> \njob.result() # blocking call\n>> 9.0", "override_signature": null, "parent": "gradio.Client"}, {"fn": null, "name": "view_api", "description": "Prints the usage info for the API. If the Gradio app has multiple API endpoints, the usage info for each endpoint will be printed separately. If return_format=\"dict\" the info is returned in dictionary format, as shown in the example below.
", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "all_endpoints", "annotation": "bool | None", "doc": "If True, prints information for both named and unnamed endpoints in the Gradio app. If False, will only print info about named endpoints. If None (default), will print info about named endpoints, unless there aren't any -- in which it will print info about unnamed endpoints.", "default": "None"}, {"name": "print_info", "annotation": "bool", "doc": "If True, prints the usage info to the console. If False, does not print the usage info.", "default": "True"}, {"name": "return_format", "annotation": "Literal['dict', 'str'] | None", "doc": "If None, nothing is returned. If \"str\", returns the same string that would be printed to the console. If \"dict\", returns the usage info as a dictionary that can be programmatically parsed, and *all endpoints are returned in the dictionary* regardless of the value of `all_endpoints`. The format of the dictionary is in the docstring of this method.", "default": "None"}], "returns": {}, "example": "from gradio_client import Client\nclient = Client(src=\"gradio/calculator\")\nclient.view_api(return_format=\"dict\")\n>> {\n 'named_endpoints': {\n '/predict': {\n 'parameters': [\n {\n 'label': 'num1',\n 'type_python': 'int | float',\n 'type_description': 'numeric value',\n 'component': 'Number',\n 'example_input': '5'\n },\n {\n 'label': 'operation',\n 'type_python': 'str',\n 'type_description': 'string value',\n 'component': 'Radio',\n 'example_input': 'add'\n },\n {\n 'label': 'num2',\n 'type_python': 'int | float',\n 'type_description': 'numeric value',\n 'component': 'Number',\n 'example_input': '5'\n },\n ],\n 'returns': [\n {\n 'label': 'output',\n 'type_python': 'int | float',\n 'type_description': 'numeric value',\n 'component': 'Number',\n },\n ]\n },\n '/flag': {\n 'parameters': [\n ...\n ],\n 'returns': [\n ...\n ]\n }\n }\n 'unnamed_endpoints': {\n 2: {\n 'parameters': [\n ...\n ],\n 'returns': [\n ...\n ]\n }\n }\n }\n}", "override_signature": null, "parent": "gradio.Client"}, {"fn": null, "name": "duplicate", "description": "Duplicates a Hugging Face Space under your account and returns a Client object for the new Space. No duplication is created if the Space already exists in your account (to override this, provide a new name for the new Space using `to_id`). To use this method, you must provide an `hf_token` or be logged in via the Hugging Face Hub CLI.
The new Space will be private by default and use the same hardware as the original Space. This can be changed by using the `private` and `hardware` parameters. For hardware upgrades (beyond the basic CPU tier), you may be required to provide billing information on Hugging Face: https://huggingface.co/settings/billing
", "tags": {}, "parameters": [{"name": "from_id", "annotation": "str", "doc": "The name of the Hugging Face Space to duplicate in the format \"{username}/{space_id}\", e.g. \"gradio/whisper\"."}, {"name": "to_id", "annotation": "str | None", "doc": "The name of the new Hugging Face Space to create, e.g. \"abidlabs/whisper-duplicate\". If not provided, the new Space will be named \"{your_HF_username}/{space_id}\".", "default": "None"}, {"name": "hf_token", "annotation": "str | None", "doc": "The Hugging Face token to use to access private Spaces. Automatically fetched if you are logged in via the Hugging Face Hub CLI. Obtain from: https://huggingface.co/settings/token", "default": "None"}, {"name": "private", "annotation": "bool", "doc": "Whether the new Space should be private (True) or public (False). Defaults to True.", "default": "True"}, {"name": "hardware", "annotation": "Literal['cpu-basic', 'cpu-upgrade', 't4-small', 't4-medium', 'a10g-small', 'a10g-large', 'a100-large'] | SpaceHardware | None", "doc": "The hardware tier to use for the new Space. Defaults to the same hardware tier as the original Space. Options include \"cpu-basic\", \"cpu-upgrade\", \"t4-small\", \"t4-medium\", \"a10g-small\", \"a10g-large\", \"a100-large\", subject to availability.", "default": "None"}, {"name": "secrets", "annotation": "dict[str, str] | None", "doc": "A dictionary of (secret key, secret value) to pass to the new Space. Defaults to None. Secrets are only used when the Space is duplicated for the first time, and are not updated if the duplicated Space already exists.", "default": "None"}, {"name": "sleep_timeout", "annotation": "int", "doc": "The number of minutes after which the duplicate Space will be puased if no requests are made to it (to minimize billing charges). Defaults to 5 minutes.", "default": "5"}, {"name": "max_workers", "annotation": "int", "doc": "The maximum number of thread workers that can be used to make requests to the remote Gradio app simultaneously.", "default": "40"}, {"name": "verbose", "annotation": "bool", "doc": "Whether the client should print statements to the console.", "default": "True"}], "returns": {}, "example": "import os\nfrom gradio_client import Client\nHF_TOKEN = os.environ.get(\"HF_TOKEN\")\nclient = Client.duplicate(\"abidlabs/whisper\", hf_token=HF_TOKEN)\nclient.predict(\"audio_sample.wav\")\n>> \"This is a test of the whisper speech recognition model.\"", "override_signature": null, "parent": "gradio.Client"}, {"fn": null, "name": "deploy_discord", "description": "Deploy the upstream app as a discord bot. Currently only supports gr.ChatInterface.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "discord_bot_token", "annotation": "str | None", "doc": "This is the \"password\" needed to be able to launch the bot. Users can get a token by creating a bot app on the discord website. If run the method without specifying a token, the space will explain how to get one. See here: https://huggingface.co/spaces/freddyaboulton/test-discord-bot-v1.", "default": "None"}, {"name": "api_names", "annotation": "list[str | tuple[str, str]] | None", "doc": "The api_names of the app to turn into bot commands. This parameter currently has no effect as ChatInterface only has one api_name ('/chat').", "default": "None"}, {"name": "to_id", "annotation": "str | None", "doc": "The name of the space hosting the discord bot. If None, the name will be gradio-discord-bot-{random-substring}", "default": "None"}, {"name": "hf_token", "annotation": "str | None", "doc": "HF api token with write priviledges in order to upload the files to HF space. Can be ommitted if logged in via the HuggingFace CLI, unless the upstream space is private. Obtain from: https://huggingface.co/settings/token", "default": "None"}, {"name": "private", "annotation": "bool", "doc": "Whether the space hosting the discord bot is private. The visibility of the discord bot itself is set via the discord website. See https://huggingface.co/spaces/freddyaboulton/test-discord-bot-v1", "default": "False"}], "returns": {}, "example": null, "override_signature": null, "parent": "gradio.Client"}], "parent": "gradio", "prev_obj": "Python-Client", "next_obj": "Job"}, "job": {"class": null, "name": "Job", "description": "A Job is a wrapper over the Future class that represents a prediction call that has been submitted by the Gradio client. This class is not meant to be instantiated directly, but rather is created by the Client.submit() method.
A Job object includes methods to get the status of the prediction call, as well to get the outputs of the prediction call. Job objects are also iterable, and can be used in a loop to get the outputs of prediction calls as they become available for generator endpoints.", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "future", "annotation": "Future", "doc": "The future object that represents the prediction call, created by the Client.submit() method"}, {"name": "communicator", "annotation": "Communicator | None", "doc": "The communicator object that is used to communicate between the client and the background thread running the job", "default": "None"}, {"name": "verbose", "annotation": "bool", "doc": "Whether to print any status-related messages to the console", "default": "True"}, {"name": "space_id", "annotation": "str | None", "doc": "The space ID corresponding to the Client object that created this Job object", "default": "None"}], "returns": {"annotation": null}, "example": null, "fns": [{"fn": null, "name": "result", "description": "Return the result of the call that the future represents. Raises CancelledError: If the future was cancelled, TimeoutError: If the future didn't finish executing before the given timeout, and Exception: If the call raised then that exception will be raised.
", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}, {"name": "timeout", "annotation": "float | None", "doc": "The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time.", "default": "None"}], "returns": {"annotation": "Any", "doc": "The result of the call that the future represents."}, "example": "from gradio_client import Client\ncalculator = Client(src=\"gradio/calculator\")\njob = calculator.submit(\"foo\", \"add\", 4, fn_index=0)\njob.result(timeout=5)\n>> 9", "override_signature": null, "parent": "gradio.Job"}, {"fn": null, "name": "outputs", "description": "Returns a list containing the latest outputs from the Job.
If the endpoint has multiple output components, the list will contain a tuple of results. Otherwise, it will contain the results without storing them in tuples.
For endpoints that are queued, this list will contain the final job output even if that endpoint does not use a generator function.
", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {}, "example": "from gradio_client import Client\nclient = Client(src=\"gradio/count_generator\")\njob = client.submit(3, api_name=\"/count\")\nwhile not job.done():\n time.sleep(0.1)\njob.outputs()\n>> ['0', '1', '2']", "override_signature": null, "parent": "gradio.Job"}, {"fn": null, "name": "status", "description": "Returns the latest status update from the Job in the form of a StatusUpdate object, which contains the following fields: code, rank, queue_size, success, time, eta, and progress_data.
progress_data is a list of updates emitted by the gr.Progress() tracker of the event handler. Each element of the list has the following fields: index, length, unit, progress, desc. If the event handler does not have a gr.Progress() tracker, the progress_data field will be None.
", "tags": {}, "parameters": [{"name": "self", "annotation": "", "doc": null}], "returns": {}, "example": "from gradio_client import Client\nclient = Client(src=\"gradio/calculator\")\njob = client.submit(5, \"add\", 4, api_name=\"/predict\")\njob.status()\n>> \njob.status().eta\n>> 43.241 # seconds", "override_signature": null, "parent": "gradio.Job"}], "parent": "gradio", "prev_obj": "Client", "next_obj": "JS-Client"}}, "events_matrix": {"AnnotatedImage": ["select"], "Audio": ["change", "clear", "play", "pause", "stop", "end", "stream", "start_recording", "stop_recording", "upload"], "BarPlot": ["change", "clear"], "Button": ["click"], "Chatbot": ["change", "select"], "Checkbox": ["change", "input", "select"], "CheckboxGroup": ["change", "input", "select"], "ClearButton": ["click"], "Code": ["change", "input"], "ColorPicker": ["change", "input", "submit", "focus", "blur"], "Dataframe": ["change", "input", "select"], "Dataset": ["click", "select"], "Dropdown": ["change", "input", "focus", "blur", "select"], "DuplicateButton": ["click"], "File": ["change", "clear", "upload", "select"], "Gallery": ["select"], "HTML": ["change"], "HighlightedText": ["change", "select"], "Image": ["change", "edit", "clear", "stream", "upload", "select"], "Interpretation": [], "JSON": ["change"], "Label": ["change", "select"], "LinePlot": ["change", "clear"], "Markdown": ["change"], "Model3D": ["change", "edit", "clear", "upload"], "Number": ["change", "input", "submit", "focus", "blur"], "Plot": ["change", "clear"], "Radio": ["change", "input", "select"], "ScatterPlot": ["change", "clear"], "Slider": ["change", "input", "release"], "State": [], "Textbox": ["change", "input", "submit", "focus", "blur", "select"], "Timeseries": ["change"], "UploadButton": ["click", "upload"], "Video": ["change", "clear", "play", "pause", "stop", "end", "start_recording", "stop_recording", "upload"]}}, "pages": ["client", "job", "error", "load", "examples", "progress", "update", "make_waveform", "eventdata", "warning", "info", "simplecsvlogger", "csvlogger", "huggingfacedatasetsaver", "request", "mount_gradio_app", "base", "queue", "blocks", "changeable", "inputable", "clickable", "submittable", "editable", "clearable", "playable", "streamable", "recordable", "focusable", "uploadable", "releaseable", "selectable", "row", "column", "tab", "group", "box", "accordion", "annotatedimage", "audio", "barplot", "button", "chatbot", "checkbox", "checkboxgroup", "clearbutton", "code", "colorpicker", "dataframe", "dataset", "dropdown", "duplicatebutton", "file", "gallery", "html", "highlightedtext", "image", "interpretation", "json", "label", "lineplot", "markdown", "model3d", "number", "plot", "radio", "scatterplot", "slider", "state", "textbox", "timeseries", "uploadbutton", "video", "chatinterface", "interface", "tabbedinterface", "parallel", "series"], "js_client": "## JavaScript Client Library\n\nA javascript (and typescript) client to call Gradio APIs.\n\n## Installation\n\nThe Gradio JavaScript client is available on npm as `@gradio/client`. You can install it as below:\n\n```sh\nnpm i @gradio/client\n```\n\n## Usage\n\nThe JavaScript Gradio Client exposes two named imports, `client` and `duplicate`.\n\n### `client`\n\nThe client function connects to the API of a hosted Gradio space and returns an object that allows you to make calls to that API.\n\nThe simplest example looks like this:\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst result = await app.predict(\"/predict\");\n```\n\nThis function accepts two arguments: `source` and `options`:\n\n#### `source`\n\nThis is the url or name of the gradio app whose API you wish to connect to. This parameter is required and should always be a string. For example:\n\n```ts\nclient(\"user/space-name\");\n```\n\n#### `options`\n\nThe options object can optionally be passed a second parameter. This object has two properties, `hf_token` and `status_callback`.\n\n##### `hf_token`\n\nThis should be a Hugging Face personal access token and is required if you wish to make calls to a private gradio api. This option is optional and should be a string starting with `\"hf_\"`.\n\nExample:\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\", { hf_token: \"hf_...\" });\n```\n\n##### `status_callback`\n\nThis should be a function which will notify your of the status of a space if it is not running. If the gradio API you are connecting to is awake and running or is not hosted on Hugging Face space then this function will do nothing.\n\n**Additional context**\n\nApplications hosted on Hugging Face spaces can be in a number of different states. As spaces are a GitOps tool and will rebuild when new changes are pushed to the repository, they have various building, running and error states. If a space is not 'running' then the function passed as the `status_callback` will notify you of the current state of the space and the status of the space as it changes. Spaces that are building or sleeping can take longer than usual to respond, so you can use this information to give users feedback about the progress of their action.\n\n```ts\nimport { client, type SpaceStatus } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\", {\n\t// The space_status parameter does not need to be manually annotated, this is just for illustration.\n\tspace_status: (space_status: SpaceStatus) => console.log(space_status)\n});\n```\n\n```ts\ninterface SpaceStatusNormal {\n\tstatus: \"sleeping\" | \"running\" | \"building\" | \"error\" | \"stopped\";\n\tdetail:\n\t\t| \"SLEEPING\"\n\t\t| \"RUNNING\"\n\t\t| \"RUNNING_BUILDING\"\n\t\t| \"BUILDING\"\n\t\t| \"NOT_FOUND\";\n\tload_status: \"pending\" | \"error\" | \"complete\" | \"generating\";\n\tmessage: string;\n}\n\ninterface SpaceStatusError {\n\tstatus: \"space_error\";\n\tdetail: \"NO_APP_FILE\" | \"CONFIG_ERROR\" | \"BUILD_ERROR\" | \"RUNTIME_ERROR\";\n\tload_status: \"error\";\n\tmessage: string;\n\tdiscussions_enabled: boolean;\n\ntype SpaceStatus = SpaceStatusNormal | SpaceStatusError;\n```\n\nThe gradio client returns an object with a number of methods and properties:\n\n#### `predict`\n\nThe `predict` method allows you to call an api endpoint and get a prediction result:\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst result = await app.predict(\"/predict\");\n```\n\n`predict` accepts two parameters, `endpoint` and `payload`. It returns a promise that resolves to the prediction result.\n\n##### `endpoint`\n\nThis is the endpoint for an api request and is required. The default endpoint for a `gradio.Interface` is `\"/predict\"`. Explicitly named endpoints have a custom name. The endpoint names can be found on the \"View API\" page of a space.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst result = await app.predict(\"/predict\");\n```\n\n##### `payload`\n\nThe `payload` argument is generally optional but this depends on the API itself. If the API endpoint depends on values being passed in then it is required for the API request to succeed. The data that should be passed in is detailed on the \"View API\" page of a space, or accessible via the `view_api()` method of the client.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst result = await app.predict(\"/predict\", [1, \"Hello\", \"friends\"]);\n```\n\n#### `submit`\n\nThe `submit` method provides a more flexible way to call an API endpoint, providing you with status updates about the current progress of the prediction as well as supporting more complex endpoint types.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst submission = app.submit(\"/predict\", payload);\n```\n\nThe `submit` method accepts the same [`endpoint`](#endpoint) and [`payload`](#payload) arguments as `predict`.\n\nThe `submit` method does not return a promise and should not be awaited, instead it returns an object with a `on`, `off`, and `cancel` methods.\n\n##### `on`\n\nThe `on` method allows you to subscribe to events related to the submitted API request. There are two types of event that can be subscribed to: `\"data\"` updates and `\"status\"` updates.\n\n`\"data\"` updates are issued when the API computes a value, the callback provided as the second argument will be called when such a value is sent to the client. The shape of the data depends on the way the API itself is constructed. This event may fire more than once if that endpoint supports emmitting new values over time.\n\n`\"status` updates are issued when the status of a request changes. This information allows you to offer feedback to users when the queue position of the request changes, or when the request changes from queued to processing.\n\nThe status payload look like this:\n\n```ts\ninterface Status {\n\tqueue: boolean;\n\tcode?: string;\n\tsuccess?: boolean;\n\tstage: \"pending\" | \"error\" | \"complete\" | \"generating\";\n\tsize?: number;\n\tposition?: number;\n\teta?: number;\n\tmessage?: string;\n\tprogress_data?: Array<{\n\t\tprogress: number | null;\n\t\tindex: number | null;\n\t\tlength: number | null;\n\t\tunit: string | null;\n\t\tdesc: string | null;\n\t}>;\n\ttime?: Date;\n}\n```\n\nUsage of these subscribe callback looks like this:\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst submission = app\n\t.submit(\"/predict\", payload)\n\t.on(\"data\", (data) => console.log(data))\n\t.on(\"status\", (status: Status) => console.log(status));\n```\n\n##### `off`\n\nThe `off` method unsubscribes from a specific event of the submitted job and works similarly to `document.removeEventListener`; both the event name and the original callback must be passed in to successfully unsubscribe:\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst handle_data = (data) => console.log(data);\n\nconst submission = app.submit(\"/predict\", payload).on(\"data\", handle_data);\n\n// later\nsubmission.off(\"/predict\", handle_data);\n```\n\n##### `destroy`\n\nThe `destroy` method will remove all subscriptions to a job, regardless of whether or not they are `\"data\"` or `\"status\"` events. This is a convenience method for when you do not want to unsubscribe use the `off` method.\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst handle_data = (data) => console.log(data);\n\nconst submission = app.submit(\"/predict\", payload).on(\"data\", handle_data);\n\n// later\nsubmission.destroy();\n```\n\n##### `cancel`\n\nCertain types of gradio function can run repeatedly and in some cases indefinitely. the `cancel` method will stop such an endpoints and prevent the API from issuing additional updates.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst submission = app\n\t.submit(\"/predict\", payload)\n\t.on(\"data\", (data) => console.log(data));\n\n// later\n\nsubmission.cancel();\n```\n\n#### `view_api`\n\nThe `view_api` method provides details about the API you are connected to. It returns a JavaScript object of all named endpoints, unnamed endpoints and what values they accept and return. This method does not accept arguments.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconst api_info = await app.view_api();\n\nconsole.log(api_info);\n```\n\n#### `config`\n\nThe `config` property contains the configuration for the gradio application you are connected to. This object may contain useful meta information about the application.\n\n```ts\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"user/space-name\");\nconsole.log(app.config);\n```\n\n### `duplicate`\n\nThe duplicate function will attempt to duplicate the space that is referenced and return an instance of `client` connected to that space. If the space has already been duplicated then it will not create a new duplicate and will instead connect to the existing duplicated space. The huggingface token that is passed in will dictate the user under which the space is created.\n\n`duplicate` accepts the same arguments as `client` with the addition of a `private` options property dictating whether the duplicated space should be private or public. A huggingface token is required for duplication to work.\n\n```ts\nimport { duplicate } from \"@gradio/client\";\n\nconst app = await duplicate(\"user/space-name\", {\n\thf_token: \"hf_...\"\n});\n```\n\nThis function accepts two arguments: `source` and `options`:\n\n#### `source`\n\nThe space to duplicate and connect to. [See `client`'s `source` parameter](#source).\n\n#### `options`\n\nAccepts all options that `client` accepts, except `hf_token` is required. [See `client`'s `options` parameter](#source).\n\n`duplicate` also accepts one additional `options` property.\n\n##### `private`\n\nThis is an optional property specific to `duplicate`'s options object and will determine whether the space should be public or private. Spaces duplicated via the `duplicate` method are public by default.\n\n```ts\nimport { duplicate } from \"@gradio/client\";\n\nconst app = await duplicate(\"user/space-name\", {\n\thf_token: \"hf_...\",\n\tprivate: true\n});\n```\n\n##### `timeout`\n\nThis is an optional property specific to `duplicate`'s options object and will set the timeout in minutes before the duplicated space will go to sleep.\n\n```ts\nimport { duplicate } from \"@gradio/client\";\n\nconst app = await duplicate(\"user/space-name\", {\n\thf_token: \"hf_...\",\n\tprivate: true,\n\ttimeout: 5\n});\n```\n\n##### `hardware`\n\nThis is an optional property specific to `duplicate`'s options object and will set the hardware for the duplicated space. By default the hardware used will match that of the original space. If this cannot be obtained it will default to `\"cpu-basic\"`. For hardware upgrades (beyond the basic CPU tier), you may be required to provide [billing information on Hugging Face](https://huggingface.co/settings/billing).\n\nPossible hardware options are:\n\n- `\"cpu-basic\"`\n- `\"cpu-upgrade\"`\n- `\"t4-small\"`\n- `\"t4-medium\"`\n- `\"a10g-small\"`\n- `\"a10g-large\"`\n- `\"a100-large\"`\n\n```ts\nimport { duplicate } from \"@gradio/client\";\n\nconst app = await duplicate(\"user/space-name\", {\n\thf_token: \"hf_...\",\n\tprivate: true,\n\thardware: \"a10g-small\"\n});\n```\n"} \ No newline at end of file diff --git a/js/_website/src/routes/guides/json/creating-a-new-component.json b/js/_website/src/routes/guides/json/creating-a-new-component.json index 1cc7201245af8..ae163407c5961 100644 --- a/js/_website/src/routes/guides/json/creating-a-new-component.json +++ b/js/_website/src/routes/guides/json/creating-a-new-component.json @@ -1 +1 @@ -{"guide": {"name": "creating-a-new-component", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 35, "pretty_name": "Creating A New Component", "content": "# How to Create a New Component\n\n## Introduction\n\nThe purpose of this guide is to illustrate how to add a new component, which you can use in your Gradio applications. The guide will be complemented by code snippets showing step by step how the [ColorPicker](https://gradio.app/docs/#colorpicker) component was added.\n\n## Prerequisites\n\nMake sure you have followed the [CONTRIBUTING.md](https://github.com/gradio-app/gradio/blob/main/CONTRIBUTING.md) guide in order to setup your local development environment (both client and server side).\n\nHere's how to create a new component on Gradio:\n\n1. [Create a New Python Class and Import it](#1-create-a-new-python-class-and-import-it)\n2. [Create a New Svelte Component](#2-create-a-new-svelte-component)\n3. [Create a New Demo](#3-create-a-new-demo)\n\n## 1. Create a New Python Class and Import it\n\nThe first thing to do is to create a new class within the [components.py](https://github.com/gradio-app/gradio/blob/main/gradio/components.py) file. This Python class should inherit from a list of base components and should be placed within the file in the correct section with respect to the type of component you want to add (e.g. input, output or static components).\nIn general, it is advisable to take an existing component as a reference (e.g. [TextBox](https://github.com/gradio-app/gradio/blob/main/gradio/components.py#L290)), copy its code as a skeleton and then adapt it to the case at hand.\n\nLet's take a look at the class added to the [components.py](https://github.com/gradio-app/gradio/blob/main/gradio/components.py) file for the ColorPicker component:\n\n```python\n@document()\nclass ColorPicker(Changeable, Submittable, IOComponent):\n \"\"\"\n Creates a color picker for user to select a color as string input.\n Preprocessing: passes selected color value as a {str} into the function.\n Postprocessing: expects a {str} returned from function and sets color picker value to it.\n Examples-format: a {str} with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.\n Demos: color_picker, color_generator\n \"\"\"\n\n def __init__(\n self,\n value: str = None,\n *,\n label: Optional[str] = None,\n show_label: bool = True,\n interactive: Optional[bool] = None,\n visible: bool = True,\n elem_id: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"\n Parameters:\n value: default text to provide in color picker.\n label: component name in interface.\n show_label: if True, will display label.\n interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.\n visible: If False, component will be hidden.\n elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n \"\"\"\n self.value = self.postprocess(value)\n self.cleared_value = \"#000000\"\n self.test_input = value\n IOComponent.__init__(\n self,\n label=label,\n show_label=show_label,\n interactive=interactive,\n visible=visible,\n elem_id=elem_id,\n **kwargs,\n )\n\n def get_config(self):\n return {\n \"value\": self.value,\n **IOComponent.get_config(self),\n }\n\n @staticmethod\n def update(\n value: Optional[Any] = None,\n label: Optional[str] = None,\n show_label: Optional[bool] = None,\n visible: Optional[bool] = None,\n interactive: Optional[bool] = None,\n ):\n return {\n \"value\": value,\n \"label\": label,\n \"show_label\": show_label,\n \"visible\": visible,\n \"interactive\": interactive,\n \"__type__\": \"update\",\n }\n\n # Input Functionalities\n def preprocess(self, x: str | None) -> Any:\n \"\"\"\n Any preprocessing needed to be performed on function input.\n Parameters:\n x (str): text\n Returns:\n (str): text\n \"\"\"\n if x is None:\n return None\n else:\n return str(x)\n\n def preprocess_example(self, x: str | None) -> Any:\n \"\"\"\n Any preprocessing needed to be performed on an example before being passed to the main function.\n \"\"\"\n if x is None:\n return None\n else:\n return str(x)\n\n # Output Functionalities\n def postprocess(self, y: str | None):\n \"\"\"\n Any postprocessing needed to be performed on function output.\n Parameters:\n y (str | None): text\n Returns:\n (str | None): text\n \"\"\"\n if y is None:\n return None\n else:\n return str(y)\n\n def deserialize(self, x):\n \"\"\"\n Convert from serialized output (e.g. base64 representation) from a call() to the interface to a human-readable version of the output (path of an image, etc.)\n \"\"\"\n return x\n```\n\nOnce defined, it is necessary to import the new class inside the [\\_\\_init\\_\\_](https://github.com/gradio-app/gradio/blob/main/gradio/__init__.py) module class in order to make it module visible.\n\n```python\n\nfrom gradio.components import (\n ...\n ColorPicker,\n ...\n)\n\n```\n\n### 1.1 Writing Unit Test for Python Class\n\nWhen developing new components, you should also write a suite of unit tests for it. The tests should be placed in the [gradio/test/test_components.py](https://github.com/gradio-app/gradio/blob/main/test/test_components.py) file. Again, as above, take a cue from the tests of other components (e.g. [Textbox](https://github.com/gradio-app/gradio/blob/main/test/test_components.py)) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component. For example, the following tests were added for the ColorPicker component:\n\n```python\nclass TestColorPicker(unittest.TestCase):\n def test_component_functions(self):\n \"\"\"\n Preprocess, postprocess, serialize, save_flagged, restore_flagged, tokenize, get_config\n \"\"\"\n color_picker_input = gr.ColorPicker()\n self.assertEqual(color_picker_input.preprocess(\"#000000\"), \"#000000\")\n self.assertEqual(color_picker_input.preprocess_example(\"#000000\"), \"#000000\")\n self.assertEqual(color_picker_input.postprocess(None), None)\n self.assertEqual(color_picker_input.postprocess(\"#FFFFFF\"), \"#FFFFFF\")\n self.assertEqual(color_picker_input.serialize(\"#000000\", True), \"#000000\")\n\n color_picker_input.interpretation_replacement = \"unknown\"\n\n self.assertEqual(\n color_picker_input.get_config(),\n {\n \"value\": None,\n \"show_label\": True,\n \"label\": None,\n \"style\": {},\n \"elem_id\": None,\n \"visible\": True,\n \"interactive\": None,\n \"name\": \"colorpicker\",\n },\n )\n\n def test_in_interface_as_input(self):\n \"\"\"\n Interface, process, interpret,\n \"\"\"\n iface = gr.Interface(lambda x: x, \"colorpicker\", \"colorpicker\")\n self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n def test_in_interface_as_output(self):\n \"\"\"\n Interface, process\n\n \"\"\"\n iface = gr.Interface(lambda x: x, \"colorpicker\", gr.ColorPicker())\n self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n def test_static(self):\n \"\"\"\n postprocess\n \"\"\"\n component = gr.ColorPicker(\"#000000\")\n self.assertEqual(component.get_config().get(\"value\"), \"#000000\")\n```\n\n## 2. Create a New Svelte Component\n\nLet's see the steps you need to follow to create the frontend of your new component and to map it to its python code:\n\n- Create a new UI-side Svelte component and figure out where to place it. The options are: create a package for the new component in the [js folder](https://github.com/gradio-app/gradio/tree/main/js/), if this is completely different from existing components or add the new component to an existing package, such as to the [form package](https://github.com/gradio-app/gradio/tree/main/js/form). The ColorPicker component for example, was included in the form package because it is similar to components that already exist.\n- Create a file with an appropriate name in the src folder of the package where you placed the Svelte component, note: the name must start with a capital letter. This is the 'core' component and it's the generic component that has no knowledge of Gradio specific functionality. Initially add any text/html to this file so that the component renders something. The Svelte application code for the ColorPicker looks like this:\n\n```typescript\n\n\n\n\n```\n\n- Export this file inside the index.ts file of the package where you placed the Svelte component by doing `export { default as FileName } from \"./FileName.svelte\"`. The ColorPicker file is exported in the [index.ts](https://github.com/gradio-app/gradio/blob/main/js/form/src/index.ts) file and the export is performed by doing: `export { default as ColorPicker } from \"./ColorPicker.svelte\";`.\n- Create the Gradio specific component in [js/app/src/components](https://github.com/gradio-app/gradio/tree/main/js/app/src/components). This is a Gradio wrapper that handles the specific logic of the library, passes the necessary data down to the core component and attaches any necessary event listeners. Copy the folder of another component, rename it and edit the code inside it, keeping the structure.\n\nHere you will have three files, the first file is for the Svelte application, and it will look like this:\n\n```typescript\n\n\n\n\n\n\t\n\n\t\n\n```\n\nThe second one contains the tests for the frontend, for example for the ColorPicker component:\n\n```typescript\nimport { test, describe, assert, afterEach } from \"vitest\";\nimport { cleanup, render } from \"@gradio/tootils\";\n\nimport ColorPicker from \"./ColorPicker.svelte\";\nimport type { LoadingStatus } from \"../StatusTracker/types\";\n\nconst loading_status = {\n\teta: 0,\n\tqueue_position: 1,\n\tstatus: \"complete\" as LoadingStatus[\"status\"],\n\tscroll_to_output: false,\n\tvisible: true,\n\tfn_index: 0\n};\n\ndescribe(\"ColorPicker\", () => {\n\tafterEach(() => cleanup());\n\n\ttest(\"renders provided value\", () => {\n\t\tconst { getByDisplayValue } = render(ColorPicker, {\n\t\t\tloading_status,\n\t\t\tshow_label: true,\n\t\t\tmode: \"dynamic\",\n\t\t\tvalue: \"#000000\",\n\t\t\tlabel: \"ColorPicker\"\n\t\t});\n\n\t\tconst item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\t\tassert.equal(item.value, \"#000000\");\n\t});\n\n\ttest(\"changing the color should update the value\", async () => {\n\t\tconst { component, getByDisplayValue } = render(ColorPicker, {\n\t\t\tloading_status,\n\t\t\tshow_label: true,\n\t\t\tmode: \"dynamic\",\n\t\t\tvalue: \"#000000\",\n\t\t\tlabel: \"ColorPicker\"\n\t\t});\n\n\t\tconst item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\n\t\tassert.equal(item.value, \"#000000\");\n\n\t\tawait component.$set({\n\t\t\tvalue: \"#FFFFFF\"\n\t\t});\n\n\t\tassert.equal(component.value, \"#FFFFFF\");\n\t});\n});\n```\n\nThe third one is the index.ts file:\n\n```typescript\nexport { default as Component } from \"./ColorPicker.svelte\";\nexport const modes = [\"static\", \"dynamic\"];\n```\n\n- Add the mapping for your component in the [directory.ts file](https://github.com/gradio-app/gradio/blob/main/js/app/src/components/directory.ts). To do this, copy and paste the mapping line of any component and edit its text. The key name must be the lowercase version of the actual component name in the Python library. So for example, for the ColorPicker component the mapping looks like this:\n\n```typescript\nexport const component_map = {\n...\ncolorpicker: () => import(\"./ColorPicker\"),\n...\n}\n```\n\n### 2.1 Writing Unit Test for Svelte Component\n\nWhen developing new components, you should also write a suite of unit tests for it. The tests should be placed in the new component's folder in a file named MyAwesomeComponent.test.ts. Again, as above, take a cue from the tests of other components (e.g. [Textbox.test.ts](https://github.com/gradio-app/gradio/blob/main/js/app/src/components/Textbox/Textbox.test.ts)) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component.\n\n### 3. Create a New Demo\n\nThe last step is to create a demo in the [gradio/demo folder](https://github.com/gradio-app/gradio/tree/main/demo), which will use the newly added component. Again, the suggestion is to reference an existing demo. Write the code for the demo in a file called run.py, add the necessary requirements and an image showing the application interface. Finally add a gif showing its usage.\nYou can take a look at the [demo](https://github.com/gradio-app/gradio/tree/main/demo/color_picker) created for the ColorPicker, where an icon and a color selected through the new component is taken as input, and the same icon colored with the selected color is returned as output.\n\nTo test the application:\n\n- run on a terminal `python path/demo/run.py` which starts the backend at the address [http://localhost:7860](http://localhost:7860);\n- in another terminal, run `pnpm dev` to start the frontend at [http://localhost:9876](http://localhost:9876) with hot reload functionalities.\n\n## Conclusion\n\nIn this guide, we have shown how simple it is to add a new component to Gradio, seeing step by step how the ColorPicker component was added. For further details, you can refer to PR: [#1695](https://github.com/gradio-app/gradio/pull/1695).\n", "html": "

How to Create a New Component

\n\n

Introduction

\n\n

The purpose of this guide is to illustrate how to add a new component, which you can use in your Gradio applications. The guide will be complemented by code snippets showing step by step how the ColorPicker component was added.

\n\n

Prerequisites

\n\n

Make sure you have followed the CONTRIBUTING.md guide in order to setup your local development environment (both client and server side).

\n\n

Here's how to create a new component on Gradio:

\n\n
    \n
  1. Create a New Python Class and Import it
  2. \n
  3. Create a New Svelte Component
  4. \n
  5. Create a New Demo
  6. \n
\n\n

1. Create a New Python Class and Import it

\n\n

The first thing to do is to create a new class within the components.py file. This Python class should inherit from a list of base components and should be placed within the file in the correct section with respect to the type of component you want to add (e.g. input, output or static components).\nIn general, it is advisable to take an existing component as a reference (e.g. TextBox), copy its code as a skeleton and then adapt it to the case at hand.

\n\n

Let's take a look at the class added to the components.py file for the ColorPicker component:

\n\n
@document()\nclass ColorPicker(Changeable, Submittable, IOComponent):\n    \"\"\"\n    Creates a color picker for user to select a color as string input.\n    Preprocessing: passes selected color value as a {str} into the function.\n    Postprocessing: expects a {str} returned from function and sets color picker value to it.\n    Examples-format: a {str} with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.\n    Demos: color_picker, color_generator\n    \"\"\"\n\n    def __init__(\n        self,\n        value: str = None,\n        *,\n        label: Optional[str] = None,\n        show_label: bool = True,\n        interactive: Optional[bool] = None,\n        visible: bool = True,\n        elem_id: Optional[str] = None,\n        **kwargs,\n    ):\n        \"\"\"\n        Parameters:\n            value: default text to provide in color picker.\n            label: component name in interface.\n            show_label: if True, will display label.\n            interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.\n            visible: If False, component will be hidden.\n            elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n        \"\"\"\n        self.value = self.postprocess(value)\n        self.cleared_value = \"#000000\"\n        self.test_input = value\n        IOComponent.__init__(\n            self,\n            label=label,\n            show_label=show_label,\n            interactive=interactive,\n            visible=visible,\n            elem_id=elem_id,\n            **kwargs,\n        )\n\n    def get_config(self):\n        return {\n            \"value\": self.value,\n            **IOComponent.get_config(self),\n        }\n\n    @staticmethod\n    def update(\n        value: Optional[Any] = None,\n        label: Optional[str] = None,\n        show_label: Optional[bool] = None,\n        visible: Optional[bool] = None,\n        interactive: Optional[bool] = None,\n    ):\n        return {\n            \"value\": value,\n            \"label\": label,\n            \"show_label\": show_label,\n            \"visible\": visible,\n            \"interactive\": interactive,\n            \"__type__\": \"update\",\n        }\n\n    # Input Functionalities\n    def preprocess(self, x: str | None) -> Any:\n        \"\"\"\n        Any preprocessing needed to be performed on function input.\n        Parameters:\n        x (str): text\n        Returns:\n        (str): text\n        \"\"\"\n        if x is None:\n            return None\n        else:\n            return str(x)\n\n    def preprocess_example(self, x: str | None) -> Any:\n        \"\"\"\n        Any preprocessing needed to be performed on an example before being passed to the main function.\n        \"\"\"\n        if x is None:\n            return None\n        else:\n            return str(x)\n\n    # Output Functionalities\n    def postprocess(self, y: str | None):\n        \"\"\"\n        Any postprocessing needed to be performed on function output.\n        Parameters:\n        y (str | None): text\n        Returns:\n        (str | None): text\n        \"\"\"\n        if y is None:\n            return None\n        else:\n            return str(y)\n\n    def deserialize(self, x):\n        \"\"\"\n        Convert from serialized output (e.g. base64 representation) from a call() to the interface to a human-readable version of the output (path of an image, etc.)\n        \"\"\"\n        return x\n
\n\n

Once defined, it is necessary to import the new class inside the __init__ module class in order to make it module visible.

\n\n
\nfrom gradio.components import (\n    ...\n    ColorPicker,\n    ...\n)\n\n
\n\n

1.1 Writing Unit Test for Python Class

\n\n

When developing new components, you should also write a suite of unit tests for it. The tests should be placed in the gradio/test/test_components.py file. Again, as above, take a cue from the tests of other components (e.g. Textbox) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component. For example, the following tests were added for the ColorPicker component:

\n\n
class TestColorPicker(unittest.TestCase):\n    def test_component_functions(self):\n        \"\"\"\n        Preprocess, postprocess, serialize, save_flagged, restore_flagged, tokenize, get_config\n        \"\"\"\n        color_picker_input = gr.ColorPicker()\n        self.assertEqual(color_picker_input.preprocess(\"#000000\"), \"#000000\")\n        self.assertEqual(color_picker_input.preprocess_example(\"#000000\"), \"#000000\")\n        self.assertEqual(color_picker_input.postprocess(None), None)\n        self.assertEqual(color_picker_input.postprocess(\"#FFFFFF\"), \"#FFFFFF\")\n        self.assertEqual(color_picker_input.serialize(\"#000000\", True), \"#000000\")\n\n        color_picker_input.interpretation_replacement = \"unknown\"\n\n        self.assertEqual(\n            color_picker_input.get_config(),\n            {\n                \"value\": None,\n                \"show_label\": True,\n                \"label\": None,\n                \"style\": {},\n                \"elem_id\": None,\n                \"visible\": True,\n                \"interactive\": None,\n                \"name\": \"colorpicker\",\n            },\n        )\n\n    def test_in_interface_as_input(self):\n        \"\"\"\n        Interface, process, interpret,\n        \"\"\"\n        iface = gr.Interface(lambda x: x, \"colorpicker\", \"colorpicker\")\n        self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n    def test_in_interface_as_output(self):\n        \"\"\"\n        Interface, process\n\n        \"\"\"\n        iface = gr.Interface(lambda x: x, \"colorpicker\", gr.ColorPicker())\n        self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n    def test_static(self):\n        \"\"\"\n        postprocess\n        \"\"\"\n        component = gr.ColorPicker(\"#000000\")\n        self.assertEqual(component.get_config().get(\"value\"), \"#000000\")\n
\n\n

2. Create a New Svelte Component

\n\n

Let's see the steps you need to follow to create the frontend of your new component and to map it to its python code:

\n\n
    \n
  • Create a new UI-side Svelte component and figure out where to place it. The options are: create a package for the new component in the js folder, if this is completely different from existing components or add the new component to an existing package, such as to the form package. The ColorPicker component for example, was included in the form package because it is similar to components that already exist.
  • \n
  • Create a file with an appropriate name in the src folder of the package where you placed the Svelte component, note: the name must start with a capital letter. This is the 'core' component and it's the generic component that has no knowledge of Gradio specific functionality. Initially add any text/html to this file so that the component renders something. The Svelte application code for the ColorPicker looks like this:
  • \n
\n\n
\n\n\n\n
\n\n
    \n
  • Export this file inside the index.ts file of the package where you placed the Svelte component by doing export { default as FileName } from \"./FileName.svelte\". The ColorPicker file is exported in the index.ts file and the export is performed by doing: export { default as ColorPicker } from \"./ColorPicker.svelte\";.
  • \n
  • Create the Gradio specific component in js/app/src/components. This is a Gradio wrapper that handles the specific logic of the library, passes the necessary data down to the core component and attaches any necessary event listeners. Copy the folder of another component, rename it and edit the code inside it, keeping the structure.
  • \n
\n\n

Here you will have three files, the first file is for the Svelte application, and it will look like this:

\n\n
\n\n\n\nmd5-1c697b7cbe98fa7733fff25acc68363e\n\n\n\n\n    \n\n    \n\n
\n\n

The second one contains the tests for the frontend, for example for the ColorPicker component:

\n\n
import { test, describe, assert, afterEach } from \"vitest\";\nimport { cleanup, render } from \"@gradio/tootils\";\n\nimport ColorPicker from \"./ColorPicker.svelte\";\nimport type { LoadingStatus } from \"../StatusTracker/types\";\n\nconst loading_status = {\n    eta: 0,\n    queue_position: 1,\n    status: \"complete\" as LoadingStatus[\"status\"],\n    scroll_to_output: false,\n    visible: true,\n    fn_index: 0\n};\n\ndescribe(\"ColorPicker\", () => {\n    afterEach(() => cleanup());\n\n    test(\"renders provided value\", () => {\n        const { getByDisplayValue } = render(ColorPicker, {\n            loading_status,\n            show_label: true,\n            mode: \"dynamic\",\n            value: \"#000000\",\n            label: \"ColorPicker\"\n        });\n\n        const item: HTMLInputElement = getByDisplayValue(\"#000000\");\n        assert.equal(item.value, \"#000000\");\n    });\n\n    test(\"changing the color should update the value\", async () => {\n        const { component, getByDisplayValue } = render(ColorPicker, {\n            loading_status,\n            show_label: true,\n            mode: \"dynamic\",\n            value: \"#000000\",\n            label: \"ColorPicker\"\n        });\n\n        const item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\n        assert.equal(item.value, \"#000000\");\n\n        await component.$set({\n            value: \"#FFFFFF\"\n        });\n\n        assert.equal(component.value, \"#FFFFFF\");\n    });\n});\n
\n\n

The third one is the index.ts file:

\n\n
export { default as Component } from \"./ColorPicker.svelte\";\nexport const modes = [\"static\", \"dynamic\"];\n
\n\n
    \n
  • Add the mapping for your component in the directory.ts file. To do this, copy and paste the mapping line of any component and edit its text. The key name must be the lowercase version of the actual component name in the Python library. So for example, for the ColorPicker component the mapping looks like this:
  • \n
\n\n
export const component_map = {\n...\ncolorpicker: () => import(\"./ColorPicker\"),\n...\n}\n
\n\n

2.1 Writing Unit Test for Svelte Component

\n\n

When developing new components, you should also write a suite of unit tests for it. The tests should be placed in the new component's folder in a file named MyAwesomeComponent.test.ts. Again, as above, take a cue from the tests of other components (e.g. Textbox.test.ts) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component.

\n\n

3. Create a New Demo

\n\n

The last step is to create a demo in the gradio/demo folder, which will use the newly added component. Again, the suggestion is to reference an existing demo. Write the code for the demo in a file called run.py, add the necessary requirements and an image showing the application interface. Finally add a gif showing its usage.\nYou can take a look at the demo created for the ColorPicker, where an icon and a color selected through the new component is taken as input, and the same icon colored with the selected color is returned as output.

\n\n

To test the application:

\n\n
    \n
  • run on a terminal python path/demo/run.py which starts the backend at the address http://localhost:7860;
  • \n
  • in another terminal, run pnpm dev to start the frontend at http://localhost:9876 with hot reload functionalities.
  • \n
\n\n

Conclusion

\n\n

In this guide, we have shown how simple it is to add a new component to Gradio, seeing step by step how the ColorPicker component was added. For further details, you can refer to PR: #1695.

\n", "tags": [], "spaces": [], "url": "/guides/creating-a-new-component/", "contributor": null}} +{"guide": {"name": "creating-a-new-component", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 35, "pretty_name": "Creating A New Component", "content": "# How to Create a New Component\n\n## Introduction\n\nThe purpose of this guide is to illustrate how to add a new component, which you can use in your Gradio applications. The guide will be complemented by code snippets showing step by step how the [ColorPicker](https://gradio.app/docs/#colorpicker) component was added.\n\n## Prerequisites\n\nMake sure you have followed the [CONTRIBUTING.md](https://github.com/gradio-app/gradio/blob/main/CONTRIBUTING.md) guide in order to setup your local development environment (both client and server side).\n\nHere's how to create a new component on Gradio:\n\n1. [Create a New Python Class and Import it](#1-create-a-new-python-class-and-import-it)\n2. [Create a New Svelte Component](#2-create-a-new-svelte-component)\n3. [Create a New Demo](#3-create-a-new-demo)\n\n## 1. Create a New Python Class and Import it\n\nThe first thing to do is to create a new class within the [components.py](https://github.com/gradio-app/gradio/blob/main/gradio/components.py) file. This Python class should inherit from a list of base components and should be placed within the file in the correct section with respect to the type of component you want to add (e.g. input, output or static components).\nIn general, it is advisable to take an existing component as a reference (e.g. [TextBox](https://github.com/gradio-app/gradio/blob/main/gradio/components.py#L290)), copy its code as a skeleton and then adapt it to the case at hand.\n\nLet's take a look at the class added to the [components.py](https://github.com/gradio-app/gradio/blob/main/gradio/components.py) file for the ColorPicker component:\n\n```python\n@document()\nclass ColorPicker(Changeable, Submittable, IOComponent):\n \"\"\"\n Creates a color picker for user to select a color as string input.\n Preprocessing: passes selected color value as a {str} into the function.\n Postprocessing: expects a {str} returned from function and sets color picker value to it.\n Examples-format: a {str} with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.\n Demos: color_picker, color_generator\n \"\"\"\n\n def __init__(\n self,\n value: str = None,\n *,\n label: Optional[str] = None,\n show_label: bool = True,\n interactive: Optional[bool] = None,\n visible: bool = True,\n elem_id: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"\n Parameters:\n value: default text to provide in color picker.\n label: component name in interface.\n show_label: if True, will display label.\n interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.\n visible: If False, component will be hidden.\n elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n \"\"\"\n self.value = self.postprocess(value)\n self.cleared_value = \"#000000\"\n self.test_input = value\n IOComponent.__init__(\n self,\n label=label,\n show_label=show_label,\n interactive=interactive,\n visible=visible,\n elem_id=elem_id,\n **kwargs,\n )\n\n def get_config(self):\n return {\n \"value\": self.value,\n **IOComponent.get_config(self),\n }\n\n @staticmethod\n def update(\n value: Optional[Any] = None,\n label: Optional[str] = None,\n show_label: Optional[bool] = None,\n visible: Optional[bool] = None,\n interactive: Optional[bool] = None,\n ):\n return {\n \"value\": value,\n \"label\": label,\n \"show_label\": show_label,\n \"visible\": visible,\n \"interactive\": interactive,\n \"__type__\": \"update\",\n }\n\n # Input Functionalities\n def preprocess(self, x: str | None) -> Any:\n \"\"\"\n Any preprocessing needed to be performed on function input.\n Parameters:\n x (str): text\n Returns:\n (str): text\n \"\"\"\n if x is None:\n return None\n else:\n return str(x)\n\n def preprocess_example(self, x: str | None) -> Any:\n \"\"\"\n Any preprocessing needed to be performed on an example before being passed to the main function.\n \"\"\"\n if x is None:\n return None\n else:\n return str(x)\n\n # Output Functionalities\n def postprocess(self, y: str | None):\n \"\"\"\n Any postprocessing needed to be performed on function output.\n Parameters:\n y (str | None): text\n Returns:\n (str | None): text\n \"\"\"\n if y is None:\n return None\n else:\n return str(y)\n\n def deserialize(self, x):\n \"\"\"\n Convert from serialized output (e.g. base64 representation) from a call() to the interface to a human-readable version of the output (path of an image, etc.)\n \"\"\"\n return x\n```\n\nOnce defined, it is necessary to import the new class inside the [\\_\\_init\\_\\_](https://github.com/gradio-app/gradio/blob/main/gradio/__init__.py) module class in order to make it module visible.\n\n```python\n\nfrom gradio.components import (\n ...\n ColorPicker,\n ...\n)\n\n```\n\n### 1.1 Writing Unit Test for Python Class\n\nWhen developing new components, you should also write a suite of unit tests for it. The tests should be placed in the [gradio/test/test_components.py](https://github.com/gradio-app/gradio/blob/main/test/test_components.py) file. Again, as above, take a cue from the tests of other components (e.g. [Textbox](https://github.com/gradio-app/gradio/blob/main/test/test_components.py)) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component. For example, the following tests were added for the ColorPicker component:\n\n```python\nclass TestColorPicker(unittest.TestCase):\n def test_component_functions(self):\n \"\"\"\n Preprocess, postprocess, serialize, save_flagged, restore_flagged, tokenize, get_config\n \"\"\"\n color_picker_input = gr.ColorPicker()\n self.assertEqual(color_picker_input.preprocess(\"#000000\"), \"#000000\")\n self.assertEqual(color_picker_input.preprocess_example(\"#000000\"), \"#000000\")\n self.assertEqual(color_picker_input.postprocess(None), None)\n self.assertEqual(color_picker_input.postprocess(\"#FFFFFF\"), \"#FFFFFF\")\n self.assertEqual(color_picker_input.serialize(\"#000000\", True), \"#000000\")\n\n color_picker_input.interpretation_replacement = \"unknown\"\n\n self.assertEqual(\n color_picker_input.get_config(),\n {\n \"value\": None,\n \"show_label\": True,\n \"label\": None,\n \"style\": {},\n \"elem_id\": None,\n \"visible\": True,\n \"interactive\": None,\n \"name\": \"colorpicker\",\n },\n )\n\n def test_in_interface_as_input(self):\n \"\"\"\n Interface, process, interpret,\n \"\"\"\n iface = gr.Interface(lambda x: x, \"colorpicker\", \"colorpicker\")\n self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n def test_in_interface_as_output(self):\n \"\"\"\n Interface, process\n\n \"\"\"\n iface = gr.Interface(lambda x: x, \"colorpicker\", gr.ColorPicker())\n self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n def test_static(self):\n \"\"\"\n postprocess\n \"\"\"\n component = gr.ColorPicker(\"#000000\")\n self.assertEqual(component.get_config().get(\"value\"), \"#000000\")\n```\n\n## 2. Create a New Svelte Component\n\nLet's see the steps you need to follow to create the frontend of your new component and to map it to its python code:\n\n- Create a new UI-side Svelte component and figure out where to place it. The options are: create a package for the new component in the [js folder](https://github.com/gradio-app/gradio/tree/main/js/), if this is completely different from existing components or add the new component to an existing package, such as to the [form package](https://github.com/gradio-app/gradio/tree/main/js/form). The ColorPicker component for example, was included in the form package because it is similar to components that already exist.\n- Create a file with an appropriate name in the src folder of the package where you placed the Svelte component, note: the name must start with a capital letter. This is the 'core' component and it's the generic component that has no knowledge of Gradio specific functionality. Initially add any text/html to this file so that the component renders something. The Svelte application code for the ColorPicker looks like this:\n\n```typescript\n\n\n\n\n```\n\n- Export this file inside the index.ts file of the package where you placed the Svelte component by doing `export { default as FileName } from \"./FileName.svelte\"`. The ColorPicker file is exported in the [index.ts](https://github.com/gradio-app/gradio/blob/main/js/form/src/index.ts) file and the export is performed by doing: `export { default as ColorPicker } from \"./ColorPicker.svelte\";`.\n- Create the Gradio specific component in [js/app/src/components](https://github.com/gradio-app/gradio/tree/main/js/app/src/components). This is a Gradio wrapper that handles the specific logic of the library, passes the necessary data down to the core component and attaches any necessary event listeners. Copy the folder of another component, rename it and edit the code inside it, keeping the structure.\n\nHere you will have three files, the first file is for the Svelte application, and it will look like this:\n\n```typescript\n\n\n\n\n\n\t\n\n\t\n\n```\n\nThe second one contains the tests for the frontend, for example for the ColorPicker component:\n\n```typescript\nimport { test, describe, assert, afterEach } from \"vitest\";\nimport { cleanup, render } from \"@gradio/tootils\";\n\nimport ColorPicker from \"./ColorPicker.svelte\";\nimport type { LoadingStatus } from \"@gradio/statustracker/types\";\n\nconst loading_status = {\n\teta: 0,\n\tqueue_position: 1,\n\tstatus: \"complete\" as LoadingStatus[\"status\"],\n\tscroll_to_output: false,\n\tvisible: true,\n\tfn_index: 0\n};\n\ndescribe(\"ColorPicker\", () => {\n\tafterEach(() => cleanup());\n\n\ttest(\"renders provided value\", () => {\n\t\tconst { getByDisplayValue } = render(ColorPicker, {\n\t\t\tloading_status,\n\t\t\tshow_label: true,\n\t\t\tmode: \"dynamic\",\n\t\t\tvalue: \"#000000\",\n\t\t\tlabel: \"ColorPicker\"\n\t\t});\n\n\t\tconst item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\t\tassert.equal(item.value, \"#000000\");\n\t});\n\n\ttest(\"changing the color should update the value\", async () => {\n\t\tconst { component, getByDisplayValue } = render(ColorPicker, {\n\t\t\tloading_status,\n\t\t\tshow_label: true,\n\t\t\tmode: \"dynamic\",\n\t\t\tvalue: \"#000000\",\n\t\t\tlabel: \"ColorPicker\"\n\t\t});\n\n\t\tconst item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\n\t\tassert.equal(item.value, \"#000000\");\n\n\t\tawait component.$set({\n\t\t\tvalue: \"#FFFFFF\"\n\t\t});\n\n\t\tassert.equal(component.value, \"#FFFFFF\");\n\t});\n});\n```\n\nThe third one is the index.ts file:\n\n```typescript\nexport { default as Component } from \"./ColorPicker.svelte\";\nexport const modes = [\"static\", \"dynamic\"];\n```\n\n- Add the mapping for your component in the [directory.ts file](https://github.com/gradio-app/gradio/blob/main/js/app/src/components/directory.ts). To do this, copy and paste the mapping line of any component and edit its text. The key name must be the lowercase version of the actual component name in the Python library. So for example, for the ColorPicker component the mapping looks like this:\n\n```typescript\nexport const component_map = {\n...\ncolorpicker: () => import(\"./ColorPicker\"),\n...\n}\n```\n\n### 2.1 Writing Unit Test for Svelte Component\n\nWhen developing new components, you should also write a suite of unit tests for it. The tests should be placed in the new component's folder in a file named MyAwesomeComponent.test.ts. Again, as above, take a cue from the tests of other components (e.g. [Textbox.test.ts](https://github.com/gradio-app/gradio/blob/main/js/app/src/components/Textbox/Textbox.test.ts)) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component.\n\n### 3. Create a New Demo\n\nThe last step is to create a demo in the [gradio/demo folder](https://github.com/gradio-app/gradio/tree/main/demo), which will use the newly added component. Again, the suggestion is to reference an existing demo. Write the code for the demo in a file called run.py, add the necessary requirements and an image showing the application interface. Finally add a gif showing its usage.\nYou can take a look at the [demo](https://github.com/gradio-app/gradio/tree/main/demo/color_picker) created for the ColorPicker, where an icon and a color selected through the new component is taken as input, and the same icon colored with the selected color is returned as output.\n\nTo test the application:\n\n- run on a terminal `python path/demo/run.py` which starts the backend at the address [http://localhost:7860](http://localhost:7860);\n- in another terminal, run `pnpm dev` to start the frontend at [http://localhost:9876](http://localhost:9876) with hot reload functionalities.\n\n## Conclusion\n\nIn this guide, we have shown how simple it is to add a new component to Gradio, seeing step by step how the ColorPicker component was added. For further details, you can refer to PR: [#1695](https://github.com/gradio-app/gradio/pull/1695).\n", "html": "

How to Create a New Component

\n\n

Introduction

\n\n

The purpose of this guide is to illustrate how to add a new component, which you can use in your Gradio applications. The guide will be complemented by code snippets showing step by step how the ColorPicker component was added.

\n\n

Prerequisites

\n\n

Make sure you have followed the CONTRIBUTING.md guide in order to setup your local development environment (both client and server side).

\n\n

Here's how to create a new component on Gradio:

\n\n
    \n
  1. Create a New Python Class and Import it
  2. \n
  3. Create a New Svelte Component
  4. \n
  5. Create a New Demo
  6. \n
\n\n

1. Create a New Python Class and Import it

\n\n

The first thing to do is to create a new class within the components.py file. This Python class should inherit from a list of base components and should be placed within the file in the correct section with respect to the type of component you want to add (e.g. input, output or static components).\nIn general, it is advisable to take an existing component as a reference (e.g. TextBox), copy its code as a skeleton and then adapt it to the case at hand.

\n\n

Let's take a look at the class added to the components.py file for the ColorPicker component:

\n\n
@document()\nclass ColorPicker(Changeable, Submittable, IOComponent):\n    \"\"\"\n    Creates a color picker for user to select a color as string input.\n    Preprocessing: passes selected color value as a {str} into the function.\n    Postprocessing: expects a {str} returned from function and sets color picker value to it.\n    Examples-format: a {str} with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.\n    Demos: color_picker, color_generator\n    \"\"\"\n\n    def __init__(\n        self,\n        value: str = None,\n        *,\n        label: Optional[str] = None,\n        show_label: bool = True,\n        interactive: Optional[bool] = None,\n        visible: bool = True,\n        elem_id: Optional[str] = None,\n        **kwargs,\n    ):\n        \"\"\"\n        Parameters:\n            value: default text to provide in color picker.\n            label: component name in interface.\n            show_label: if True, will display label.\n            interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.\n            visible: If False, component will be hidden.\n            elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n        \"\"\"\n        self.value = self.postprocess(value)\n        self.cleared_value = \"#000000\"\n        self.test_input = value\n        IOComponent.__init__(\n            self,\n            label=label,\n            show_label=show_label,\n            interactive=interactive,\n            visible=visible,\n            elem_id=elem_id,\n            **kwargs,\n        )\n\n    def get_config(self):\n        return {\n            \"value\": self.value,\n            **IOComponent.get_config(self),\n        }\n\n    @staticmethod\n    def update(\n        value: Optional[Any] = None,\n        label: Optional[str] = None,\n        show_label: Optional[bool] = None,\n        visible: Optional[bool] = None,\n        interactive: Optional[bool] = None,\n    ):\n        return {\n            \"value\": value,\n            \"label\": label,\n            \"show_label\": show_label,\n            \"visible\": visible,\n            \"interactive\": interactive,\n            \"__type__\": \"update\",\n        }\n\n    # Input Functionalities\n    def preprocess(self, x: str | None) -> Any:\n        \"\"\"\n        Any preprocessing needed to be performed on function input.\n        Parameters:\n        x (str): text\n        Returns:\n        (str): text\n        \"\"\"\n        if x is None:\n            return None\n        else:\n            return str(x)\n\n    def preprocess_example(self, x: str | None) -> Any:\n        \"\"\"\n        Any preprocessing needed to be performed on an example before being passed to the main function.\n        \"\"\"\n        if x is None:\n            return None\n        else:\n            return str(x)\n\n    # Output Functionalities\n    def postprocess(self, y: str | None):\n        \"\"\"\n        Any postprocessing needed to be performed on function output.\n        Parameters:\n        y (str | None): text\n        Returns:\n        (str | None): text\n        \"\"\"\n        if y is None:\n            return None\n        else:\n            return str(y)\n\n    def deserialize(self, x):\n        \"\"\"\n        Convert from serialized output (e.g. base64 representation) from a call() to the interface to a human-readable version of the output (path of an image, etc.)\n        \"\"\"\n        return x\n
\n\n

Once defined, it is necessary to import the new class inside the __init__ module class in order to make it module visible.

\n\n
\nfrom gradio.components import (\n    ...\n    ColorPicker,\n    ...\n)\n\n
\n\n

1.1 Writing Unit Test for Python Class

\n\n

When developing new components, you should also write a suite of unit tests for it. The tests should be placed in the gradio/test/test_components.py file. Again, as above, take a cue from the tests of other components (e.g. Textbox) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component. For example, the following tests were added for the ColorPicker component:

\n\n
class TestColorPicker(unittest.TestCase):\n    def test_component_functions(self):\n        \"\"\"\n        Preprocess, postprocess, serialize, save_flagged, restore_flagged, tokenize, get_config\n        \"\"\"\n        color_picker_input = gr.ColorPicker()\n        self.assertEqual(color_picker_input.preprocess(\"#000000\"), \"#000000\")\n        self.assertEqual(color_picker_input.preprocess_example(\"#000000\"), \"#000000\")\n        self.assertEqual(color_picker_input.postprocess(None), None)\n        self.assertEqual(color_picker_input.postprocess(\"#FFFFFF\"), \"#FFFFFF\")\n        self.assertEqual(color_picker_input.serialize(\"#000000\", True), \"#000000\")\n\n        color_picker_input.interpretation_replacement = \"unknown\"\n\n        self.assertEqual(\n            color_picker_input.get_config(),\n            {\n                \"value\": None,\n                \"show_label\": True,\n                \"label\": None,\n                \"style\": {},\n                \"elem_id\": None,\n                \"visible\": True,\n                \"interactive\": None,\n                \"name\": \"colorpicker\",\n            },\n        )\n\n    def test_in_interface_as_input(self):\n        \"\"\"\n        Interface, process, interpret,\n        \"\"\"\n        iface = gr.Interface(lambda x: x, \"colorpicker\", \"colorpicker\")\n        self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n    def test_in_interface_as_output(self):\n        \"\"\"\n        Interface, process\n\n        \"\"\"\n        iface = gr.Interface(lambda x: x, \"colorpicker\", gr.ColorPicker())\n        self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n    def test_static(self):\n        \"\"\"\n        postprocess\n        \"\"\"\n        component = gr.ColorPicker(\"#000000\")\n        self.assertEqual(component.get_config().get(\"value\"), \"#000000\")\n
\n\n

2. Create a New Svelte Component

\n\n

Let's see the steps you need to follow to create the frontend of your new component and to map it to its python code:

\n\n
    \n
  • Create a new UI-side Svelte component and figure out where to place it. The options are: create a package for the new component in the js folder, if this is completely different from existing components or add the new component to an existing package, such as to the form package. The ColorPicker component for example, was included in the form package because it is similar to components that already exist.
  • \n
  • Create a file with an appropriate name in the src folder of the package where you placed the Svelte component, note: the name must start with a capital letter. This is the 'core' component and it's the generic component that has no knowledge of Gradio specific functionality. Initially add any text/html to this file so that the component renders something. The Svelte application code for the ColorPicker looks like this:
  • \n
\n\n
\n\n\n\n
\n\n
    \n
  • Export this file inside the index.ts file of the package where you placed the Svelte component by doing export { default as FileName } from \"./FileName.svelte\". The ColorPicker file is exported in the index.ts file and the export is performed by doing: export { default as ColorPicker } from \"./ColorPicker.svelte\";.
  • \n
  • Create the Gradio specific component in js/app/src/components. This is a Gradio wrapper that handles the specific logic of the library, passes the necessary data down to the core component and attaches any necessary event listeners. Copy the folder of another component, rename it and edit the code inside it, keeping the structure.
  • \n
\n\n

Here you will have three files, the first file is for the Svelte application, and it will look like this:

\n\n
\n\n\n\nmd5-5bf208f8c1244c0681e3d075472ffa26\n\n\n\n\n    \n\n    \n\n
\n\n

The second one contains the tests for the frontend, for example for the ColorPicker component:

\n\n
import { test, describe, assert, afterEach } from \"vitest\";\nimport { cleanup, render } from \"@gradio/tootils\";\n\nimport ColorPicker from \"./ColorPicker.svelte\";\nimport type { LoadingStatus } from \"@gradio/statustracker/types\";\n\nconst loading_status = {\n    eta: 0,\n    queue_position: 1,\n    status: \"complete\" as LoadingStatus[\"status\"],\n    scroll_to_output: false,\n    visible: true,\n    fn_index: 0\n};\n\ndescribe(\"ColorPicker\", () => {\n    afterEach(() => cleanup());\n\n    test(\"renders provided value\", () => {\n        const { getByDisplayValue } = render(ColorPicker, {\n            loading_status,\n            show_label: true,\n            mode: \"dynamic\",\n            value: \"#000000\",\n            label: \"ColorPicker\"\n        });\n\n        const item: HTMLInputElement = getByDisplayValue(\"#000000\");\n        assert.equal(item.value, \"#000000\");\n    });\n\n    test(\"changing the color should update the value\", async () => {\n        const { component, getByDisplayValue } = render(ColorPicker, {\n            loading_status,\n            show_label: true,\n            mode: \"dynamic\",\n            value: \"#000000\",\n            label: \"ColorPicker\"\n        });\n\n        const item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\n        assert.equal(item.value, \"#000000\");\n\n        await component.$set({\n            value: \"#FFFFFF\"\n        });\n\n        assert.equal(component.value, \"#FFFFFF\");\n    });\n});\n
\n\n

The third one is the index.ts file:

\n\n
export { default as Component } from \"./ColorPicker.svelte\";\nexport const modes = [\"static\", \"dynamic\"];\n
\n\n
    \n
  • Add the mapping for your component in the directory.ts file. To do this, copy and paste the mapping line of any component and edit its text. The key name must be the lowercase version of the actual component name in the Python library. So for example, for the ColorPicker component the mapping looks like this:
  • \n
\n\n
export const component_map = {\n...\ncolorpicker: () => import(\"./ColorPicker\"),\n...\n}\n
\n\n

2.1 Writing Unit Test for Svelte Component

\n\n

When developing new components, you should also write a suite of unit tests for it. The tests should be placed in the new component's folder in a file named MyAwesomeComponent.test.ts. Again, as above, take a cue from the tests of other components (e.g. Textbox.test.ts) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component.

\n\n

3. Create a New Demo

\n\n

The last step is to create a demo in the gradio/demo folder, which will use the newly added component. Again, the suggestion is to reference an existing demo. Write the code for the demo in a file called run.py, add the necessary requirements and an image showing the application interface. Finally add a gif showing its usage.\nYou can take a look at the demo created for the ColorPicker, where an icon and a color selected through the new component is taken as input, and the same icon colored with the selected color is returned as output.

\n\n

To test the application:

\n\n
    \n
  • run on a terminal python path/demo/run.py which starts the backend at the address http://localhost:7860;
  • \n
  • in another terminal, run pnpm dev to start the frontend at http://localhost:9876 with hot reload functionalities.
  • \n
\n\n

Conclusion

\n\n

In this guide, we have shown how simple it is to add a new component to Gradio, seeing step by step how the ColorPicker component was added. For further details, you can refer to PR: #1695.

\n", "tags": [], "spaces": [], "url": "/guides/creating-a-new-component/", "contributor": null}} \ No newline at end of file diff --git a/js/_website/src/routes/guides/json/guides_by_category.json b/js/_website/src/routes/guides/json/guides_by_category.json index ab7e3fe2a3006..7c77a915ba6ea 100644 --- a/js/_website/src/routes/guides/json/guides_by_category.json +++ b/js/_website/src/routes/guides/json/guides_by_category.json @@ -1 +1 @@ -{"guides_by_category": [{"category": "Getting Started", "guides": [{"name": "quickstart", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 1, "absolute_index": 0, "pretty_name": "Quickstart", "content": "# Quickstart\n\n**Prerequisite**: Gradio requires Python 3.8 or higher, that's all!\n\n## What Does Gradio Do?\n\nOne of the *best ways to share* your machine learning model, API, or data science workflow with others is to create an **interactive app** that allows your users or colleagues to try out the demo in their browsers.\n\nGradio allows you to **build demos and share them, all in Python.** And usually in just a few lines of code! So let's get started.\n\n## Hello, World\n\nTo get Gradio running with a simple \"Hello, World\" example, follow these three steps:\n\n1\\. Install Gradio using pip:\n\n```bash\npip install gradio\n```\n\n2\\. Run the code below as a Python script or in a Jupyter Notebook (or [Google Colab](https://colab.research.google.com/drive/18ODkJvyxHutTN0P5APWyGFO_xwNcgHDZ?usp=sharing)):\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n \ndemo.launch() \n```\n\nWe shorten the imported name to `gr` for better readability of code using Gradio. This is a widely adopted convention that you should follow so that anyone working with your code can easily understand it.\n\n3\\. The demo below will appear automatically within the Jupyter Notebook, or pop in a browser on [http://localhost:7860](http://localhost:7860) if running from a script:\n\n\n\nWhen developing locally, if you want to run the code as a Python script, you can use the Gradio CLI to launch the application **in reload mode**, which will provide seamless and fast development. Learn more about reloading in the [Auto-Reloading Guide](https://gradio.app/developing-faster-with-reload-mode/).\n\n```bash\ngradio app.py\n```\n\nNote: you can also do `python app.py`, but it won't provide the automatic reload mechanism.\n\n## The `Interface` Class\n\nYou'll notice that in order to make the demo, we created a `gr.Interface`. This `Interface` class can wrap any Python function with a user interface. In the example above, we saw a simple text-based function, but the function could be anything from music generator to a tax calculator to the prediction function of a pretrained machine learning model.\n\nThe core `Interface` class is initialized with three required parameters:\n\n- `fn`: the function to wrap a UI around\n- `inputs`: which component(s) to use for the input (e.g. `\"text\"`, `\"image\"` or `\"audio\"`)\n- `outputs`: which component(s) to use for the output (e.g. `\"text\"`, `\"image\"` or `\"label\"`)\n\nLet's take a closer look at these components used to provide input and output.\n\n## Components Attributes\n\nWe saw some simple `Textbox` components in the previous examples, but what if you want to change how the UI components look or behave?\n\nLet's say you want to customize the input text field \u2014 for example, you wanted it to be larger and have a text placeholder. If we use the actual class for `Textbox` instead of using the string shortcut, you have access to much more customizability through component attributes.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(\n fn=greet,\n inputs=gr.Textbox(lines=2, placeholder=\"Name Here...\"),\n outputs=\"text\",\n)\ndemo.launch()\n\n```\n\n\n## Multiple Input and Output Components\n\nSuppose you had a more complex function, with multiple inputs and outputs. In the example below, we define a function that takes a string, boolean, and number, and returns a string and number. Take a look how you pass a list of input and output components.\n\n```python\nimport gradio as gr\n\ndef greet(name, is_morning, temperature):\n salutation = \"Good morning\" if is_morning else \"Good evening\"\n greeting = f\"{salutation} {name}. It is {temperature} degrees today\"\n celsius = (temperature - 32) * 5 / 9\n return greeting, round(celsius, 2)\n\ndemo = gr.Interface(\n fn=greet,\n inputs=[\"text\", \"checkbox\", gr.Slider(0, 100)],\n outputs=[\"text\", \"number\"],\n)\ndemo.launch()\n\n```\n\n\nYou simply wrap the components in a list. Each component in the `inputs` list corresponds to one of the parameters of the function, in order. Each component in the `outputs` list corresponds to one of the values returned by the function, again in order.\n\n## An Image Example\n\nGradio supports many types of components, such as `Image`, `DataFrame`, `Video`, or `Label`. Let's try an image-to-image function to get a feel for these!\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n sepia_filter = np.array([\n [0.393, 0.769, 0.189], \n [0.349, 0.686, 0.168], \n [0.272, 0.534, 0.131]\n ])\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n```\n\n\nWhen using the `Image` component as input, your function will receive a NumPy array with the shape `(height, width, 3)`, where the last dimension represents the RGB values. We'll return an image as well in the form of a NumPy array.\n\nYou can also set the datatype used by the component with the `type=` keyword argument. For example, if you wanted your function to take a file path to an image instead of a NumPy array, the input `Image` component could be written as:\n\n```python\ngr.Image(type=\"filepath\", shape=...)\n```\n\nAlso note that our input `Image` component comes with an edit button \ud83d\udd89, which allows for cropping and zooming into images. Manipulating images in this way can help reveal biases or hidden flaws in a machine learning model!\n\nYou can read more about the many components and how to use them in the [Gradio docs](https://gradio.app/docs).\n\n## Chatbots\n\nGradio includes a high-level class, `gr.ChatInterface`, which is similar to `gr.Interface`, but is specifically designed for chatbot UIs. The `gr.ChatInterface` class also wraps a function but this function must have a specific signature. The function should take two arguments: `message` and then `history` (the arguments can be named anything, but must be in this order)\n\n* `message`: a `str` representing the user's input\n* `history`: a `list` of `list` representing the conversations up until that point. Each inner list consists of two `str` representing a pair: `[user input, bot response]`. \n\nYour function should return a single string response, which is the bot's response to the particular user input `message`.\n\nOther than that, `gr.ChatInterface` has no required parameters (though several are available for customization of the UI).\n\nHere's a toy example:\n\n```python\nimport random\nimport gradio as gr\n\ndef random_response(message, history):\n return random.choice([\"Yes\", \"No\"])\n\ndemo = gr.ChatInterface(random_response)\n\ndemo.launch()\n\n```\n\n\nYou can [read more about `gr.ChatInterface` here](https://gradio.app/guides/creating-a-chatbot-fast).\n\n## Blocks: More Flexibility and Control\n\nGradio offers two approaches to build apps:\n\n1\\. **Interface** and **ChatInterface**, which provide a high-level abstraction for creating demos that we've been discussing so far.\n\n2\\. **Blocks**, a low-level API for designing web apps with more flexible layouts and data flows. Blocks allows you to do things like feature multiple data flows and demos, control where components appear on the page, handle complex data flows (e.g. outputs can serve as inputs to other functions), and update properties/visibility of components based on user interaction \u2014 still all in Python. If this customizability is what you need, try `Blocks` instead!\n\n## Hello, Blocks\n\nLet's take a look at a simple example. Note how the API here differs from `Interface`.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n name = gr.Textbox(label=\"Name\")\n output = gr.Textbox(label=\"Output Box\")\n greet_btn = gr.Button(\"Greet\")\n greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n \n\ndemo.launch()\n```\n\n\nThings to note:\n\n- `Blocks` are made with a `with` clause, and any component created inside this clause is automatically added to the app.\n- Components appear vertically in the app in the order they are created. (Later we will cover customizing layouts!)\n- A `Button` was created, and then a `click` event-listener was added to this button. The API for this should look familiar! Like an `Interface`, the `click` method takes a Python function, input components, and output components.\n\n## More Complexity\n\nHere's an app to give you a taste of what's possible with `Blocks`:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nA lot more going on here! We'll cover how to create complex `Blocks` apps like this in the [building with blocks](https://gradio.app/building_with_blocks) section for you.\n\nCongrats, you're now familiar with the basics of Gradio! \ud83e\udd73 Go to our [next guide](https://gradio.app/key_features) to learn more about the key features of Gradio.\n", "html": "

Quickstart

\n\n

Prerequisite: Gradio requires Python 3.8 or higher, that's all!

\n\n

What Does Gradio Do?

\n\n

One of the best ways to share your machine learning model, API, or data science workflow with others is to create an interactive app that allows your users or colleagues to try out the demo in their browsers.

\n\n

Gradio allows you to build demos and share them, all in Python. And usually in just a few lines of code! So let's get started.

\n\n

Hello, World

\n\n

To get Gradio running with a simple \"Hello, World\" example, follow these three steps:

\n\n

1. Install Gradio using pip:

\n\n
pip install gradio\n
\n\n

2. Run the code below as a Python script or in a Jupyter Notebook (or Google Colab):

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n\ndemo.launch()   \n
\n\n

We shorten the imported name to gr for better readability of code using Gradio. This is a widely adopted convention that you should follow so that anyone working with your code can easily understand it.

\n\n

3. The demo below will appear automatically within the Jupyter Notebook, or pop in a browser on http://localhost:7860 if running from a script:

\n\n

\n\n

When developing locally, if you want to run the code as a Python script, you can use the Gradio CLI to launch the application in reload mode, which will provide seamless and fast development. Learn more about reloading in the Auto-Reloading Guide.

\n\n
gradio app.py\n
\n\n

Note: you can also do python app.py, but it won't provide the automatic reload mechanism.

\n\n

The Interface Class

\n\n

You'll notice that in order to make the demo, we created a gr.Interface. This Interface class can wrap any Python function with a user interface. In the example above, we saw a simple text-based function, but the function could be anything from music generator to a tax calculator to the prediction function of a pretrained machine learning model.

\n\n

The core Interface class is initialized with three required parameters:

\n\n
    \n
  • fn: the function to wrap a UI around
  • \n
  • inputs: which component(s) to use for the input (e.g. \"text\", \"image\" or \"audio\")
  • \n
  • outputs: which component(s) to use for the output (e.g. \"text\", \"image\" or \"label\")
  • \n
\n\n

Let's take a closer look at these components used to provide input and output.

\n\n

Components Attributes

\n\n

We saw some simple Textbox components in the previous examples, but what if you want to change how the UI components look or behave?

\n\n

Let's say you want to customize the input text field \u2014 for example, you wanted it to be larger and have a text placeholder. If we use the actual class for Textbox instead of using the string shortcut, you have access to much more customizability through component attributes.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(\n    fn=greet,\n    inputs=gr.Textbox(lines=2, placeholder=\"Name Here...\"),\n    outputs=\"text\",\n)\ndemo.launch()\n\n
\n\n

\n\n

Multiple Input and Output Components

\n\n

Suppose you had a more complex function, with multiple inputs and outputs. In the example below, we define a function that takes a string, boolean, and number, and returns a string and number. Take a look how you pass a list of input and output components.

\n\n
import gradio as gr\n\ndef greet(name, is_morning, temperature):\n    salutation = \"Good morning\" if is_morning else \"Good evening\"\n    greeting = f\"{salutation} {name}. It is {temperature} degrees today\"\n    celsius = (temperature - 32) * 5 / 9\n    return greeting, round(celsius, 2)\n\ndemo = gr.Interface(\n    fn=greet,\n    inputs=[\"text\", \"checkbox\", gr.Slider(0, 100)],\n    outputs=[\"text\", \"number\"],\n)\ndemo.launch()\n\n
\n\n

\n\n

You simply wrap the components in a list. Each component in the inputs list corresponds to one of the parameters of the function, in order. Each component in the outputs list corresponds to one of the values returned by the function, again in order.

\n\n

An Image Example

\n\n

Gradio supports many types of components, such as Image, DataFrame, Video, or Label. Let's try an image-to-image function to get a feel for these!

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n    sepia_filter = np.array([\n        [0.393, 0.769, 0.189], \n        [0.349, 0.686, 0.168], \n        [0.272, 0.534, 0.131]\n    ])\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n
\n\n

\n\n

When using the Image component as input, your function will receive a NumPy array with the shape (height, width, 3), where the last dimension represents the RGB values. We'll return an image as well in the form of a NumPy array.

\n\n

You can also set the datatype used by the component with the type= keyword argument. For example, if you wanted your function to take a file path to an image instead of a NumPy array, the input Image component could be written as:

\n\n
gr.Image(type=\"filepath\", shape=...)\n
\n\n

Also note that our input Image component comes with an edit button \ud83d\udd89, which allows for cropping and zooming into images. Manipulating images in this way can help reveal biases or hidden flaws in a machine learning model!

\n\n

You can read more about the many components and how to use them in the Gradio docs.

\n\n

Chatbots

\n\n

Gradio includes a high-level class, gr.ChatInterface, which is similar to gr.Interface, but is specifically designed for chatbot UIs. The gr.ChatInterface class also wraps a function but this function must have a specific signature. The function should take two arguments: message and then history (the arguments can be named anything, but must be in this order)

\n\n
    \n
  • message: a str representing the user's input
  • \n
  • history: a list of list representing the conversations up until that point. Each inner list consists of two str representing a pair: [user input, bot response].
  • \n
\n\n

Your function should return a single string response, which is the bot's response to the particular user input message.

\n\n

Other than that, gr.ChatInterface has no required parameters (though several are available for customization of the UI).

\n\n

Here's a toy example:

\n\n
import random\nimport gradio as gr\n\ndef random_response(message, history):\n    return random.choice([\"Yes\", \"No\"])\n\ndemo = gr.ChatInterface(random_response)\n\ndemo.launch()\n\n
\n\n

\n\n

You can read more about gr.ChatInterface here.

\n\n

Blocks: More Flexibility and Control

\n\n

Gradio offers two approaches to build apps:

\n\n

1. Interface and ChatInterface, which provide a high-level abstraction for creating demos that we've been discussing so far.

\n\n

2. Blocks, a low-level API for designing web apps with more flexible layouts and data flows. Blocks allows you to do things like feature multiple data flows and demos, control where components appear on the page, handle complex data flows (e.g. outputs can serve as inputs to other functions), and update properties/visibility of components based on user interaction \u2014 still all in Python. If this customizability is what you need, try Blocks instead!

\n\n

Hello, Blocks

\n\n

Let's take a look at a simple example. Note how the API here differs from Interface.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n    name = gr.Textbox(label=\"Name\")\n    output = gr.Textbox(label=\"Output Box\")\n    greet_btn = gr.Button(\"Greet\")\n    greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n\n\ndemo.launch()\n
\n\n

\n\n

Things to note:

\n\n
    \n
  • Blocks are made with a with clause, and any component created inside this clause is automatically added to the app.
  • \n
  • Components appear vertically in the app in the order they are created. (Later we will cover customizing layouts!)
  • \n
  • A Button was created, and then a click event-listener was added to this button. The API for this should look familiar! Like an Interface, the click method takes a Python function, input components, and output components.
  • \n
\n\n

More Complexity

\n\n

Here's an app to give you a taste of what's possible with Blocks:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

A lot more going on here! We'll cover how to create complex Blocks apps like this in the building with blocks section for you.

\n\n

Congrats, you're now familiar with the basics of Gradio! \ud83e\udd73 Go to our next guide to learn more about the key features of Gradio.

\n", "tags": [], "spaces": [], "url": "/guides/quickstart/", "contributor": null}, {"name": "key-features", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 2, "absolute_index": 1, "pretty_name": "Key Features", "content": "# Key Features\n\nLet's go through some of the most popular features of Gradio! Here are Gradio's key features:\n\n1. [Adding example inputs](#example-inputs)\n2. [Passing custom error messages](#alerts)\n3. [Adding descriptive content](#descriptive-content)\n4. [Setting up flagging](#flagging)\n5. [Preprocessing and postprocessing](#preprocessing-and-postprocessing)\n6. [Styling demos](#styling)\n7. [Queuing users](#queuing)\n8. [Iterative outputs](#iterative-outputs)\n9. [Progress bars](#progress-bars)\n10. [Batch functions](#batch-functions)\n11. [Running on collaborative notebooks](#colab-notebooks)\n\n## Example Inputs\n\nYou can provide example data that a user can easily load into `Interface`. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a **nested list** to the `examples=` keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the [Docs](https://gradio.app/docs#components).\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n if num2 == 0:\n raise gr.Error(\"Cannot divide by zero!\")\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\", \n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n examples=[\n [5, \"add\", 3],\n [4, \"divide\", 2],\n [-4, \"multiply\", 2.5],\n [0, \"subtract\", 1.2],\n ],\n title=\"Toy Calculator\",\n description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n```\n\n\nYou can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the `examples_per_page` argument of `Interface`).\n\nContinue learning about examples in the [More On Examples](https://gradio.app/more-on-examples) guide.\n\n## Alerts\n\nYou wish to pass custom error messages to the user. To do so, raise a `gr.Error(\"custom message\")` to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the [docs](https://gradio.app/docs#error). \n\nYou can also issue `gr.Warning(\"message\")` and `gr.Info(\"message\")` by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work. \n\nNote below how the `gr.Error` has to be raised, while the `gr.Warning` and `gr.Info` are single lines.\n\n```python\ndef start_process(name):\n gr.Info(\"Starting process\")\n if name is None:\n gr.Warning(\"Name is empty\")\n ...\n if success == False:\n raise gr.Error(\"Process failed\")\n```\n \n## Descriptive Content\n\nIn the previous example, you may have noticed the `title=` and `description=` keyword arguments in the `Interface` constructor that helps users understand your app.\n\nThere are three arguments in the `Interface` constructor to specify where this content should go:\n\n* `title`: which accepts text and can display it at the very top of interface, and also becomes the page title.\n* `description`: which accepts text, markdown or HTML and places it right under the title.\n* `article`: which also accepts text, markdown or HTML and places it below the interface.\n\n![annotated](https://github.com/gradio-app/gradio/blob/main/guides/assets/annotated.png?raw=true)\n\nIf you're using the `Blocks` API instead, you can insert text, markdown, or HTML anywhere using the `gr.Markdown(...)` or `gr.HTML(...)` components, with descriptive content inside the `Component` constructor.\n\nAnother useful keyword argument is `label=`, which is present in every `Component`. This modifies the label text at the top of each `Component`. You can also add the `info=` keyword argument to form elements like `Textbox` or `Radio` to provide further information on their usage.\n\n```python\ngr.Number(label='Age', info='In years, must be greater than 0')\n```\n\n## Flagging\n\nBy default, an `Interface` will have \"Flag\" button. When a user testing your `Interface` sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the `flagging_dir=` argument to the `Interface` constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.\n\nFor example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- calculator.py\n+-- flagged/\n| +-- logs.csv\n```\n\n*flagged/logs.csv*\n\n```csv\nnum1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n```\n\nWith the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- sepia.py\n+-- flagged/\n| +-- logs.csv\n| +-- im/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n\n*flagged/logs.csv*\n\n```csv\nim,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.\n\n## Preprocessing and Postprocessing\n\n![](https://github.com/gradio-app/gradio/blob/main/js/_website/src/assets/img/dataflow.svg?raw=true)\n\nAs you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.\n\nWhen a component is used as an input, Gradio automatically handles the *preprocessing* needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a `numpy` array).\n\nSimilarly, when a component is used as an output, Gradio automatically handles the *postprocessing* needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a `Gallery` of images in base64 format).\n\nYou can control the *preprocessing* using the parameters when constructing the image component. For example, here if you instantiate the `Image` component with the following parameters, it will convert the image to the `PIL` type and reshape it to be `(100, 100)` no matter the original size that it was submitted as:\n\n```py\nimg = gr.Image(shape=(100, 100), type=\"pil\")\n```\n\nIn contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:\n\n```py\nimg = gr.Image(invert_colors=True, type=\"numpy\")\n```\n\nPostprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the `Image` a `numpy` array or a `str` filepath?) and postprocesses it into a format that can be displayed by the browser.\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the preprocessing-related parameters for each Component.\n\n## Styling\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Interface` constructor. For example:\n\n```python\ndemo = gr.Interface(..., theme=gr.themes.Monochrome())\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](https://gradio.app/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n\n```python\nwith gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nSome components can be additionally styled through the `style()` method. For example:\n\n```python\nimg = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n```\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the styling options for each Component.\n\n## Queuing\n\nIf your app expects heavy traffic, use the `queue()` method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(...).queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```python\nwith gr.Blocks() as demo:\n #...\ndemo.queue()\ndemo.launch()\n```\n\nYou can control the number of requests processed at a single time as such:\n\n```python\ndemo.queue(concurrency_count=3)\n```\n\nSee the [Docs on queueing](/docs/#queue) on configuring other queuing parameters.\n\nTo specify only certain functions for queueing in Blocks:\n\n```python\nwith gr.Blocks() as demo2:\n num1 = gr.Number()\n num2 = gr.Number()\n output = gr.Number()\n gr.Button(\"Add\").click(\n lambda a, b: a + b, [num1, num2], output)\n gr.Button(\"Multiply\").click(\n lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n```\n\n## Iterative Outputs\n\nIn some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.\n\nIn such cases, you can supply a **generator** function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single `return` value, a function should `yield` a series of values instead. Usually the `yield` statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:\n\n```python\ndef my_generator(x):\n for i in range(x):\n yield i\n```\n\nYou supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:\n\n```python\nimport gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n for _ in range(steps):\n time.sleep(1)\n image = np.random.random((600, 600, 3))\n yield image\n image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n```\n\n\nNote that we've added a `time.sleep(1)` in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).\n\nSupplying a generator into Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n## Progress Bars\n\nGradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a `gr.Progress` instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the `tqdm()` method of the `Progress` instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.\n\n```python\nimport gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n progress(0, desc=\"Starting\")\n time.sleep(1)\n progress(0.05)\n new_string = \"\"\n for letter in progress.tqdm(word, desc=\"Reversing\"):\n time.sleep(0.25)\n new_string = letter + new_string\n return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n demo.queue(concurrency_count=10).launch()\n\n```\n\n\nIf you use the `tqdm` library, you can even report progress updates automatically from any `tqdm.tqdm` that already exists within your function by setting the default argument as `gr.Progress(track_tqdm=True)`!\n\n## Batch Functions\n\nGradio supports the ability to pass *batch* functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.\n\nFor example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:\n\n```py\nimport time\n\ndef trim_words(words, lens):\n trimmed_words = []\n time.sleep(5)\n for w, l in zip(words, lens):\n trimmed_words.append(w[:int(l)]) \n return [trimmed_words]\n```\n\nThe advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically *batch* incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe `batch=True` and `max_batch_size=16` -- both of these parameters can be passed\ninto event triggers or into the `Interface` class)\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n word = gr.Textbox(label=\"word\")\n leng = gr.Number(label=\"leng\")\n output = gr.Textbox(label=\"Output\")\n with gr.Row():\n run = gr.Button()\n\n event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n```\n\nIn the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face `transformers` and `diffusers` models\nwork very naturally with Gradio's batch mode: here's [an example demo using diffusers to\ngenerate images in batches](https://github.com/gradio-app/gradio/blob/main/demo/diffusers_with_batching/run.py)\n\nNote: using batch functions with Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n\n## Colab Notebooks\n\n\nGradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as [Google Colab](https://colab.research.google.com/). In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by [service worker tunneling](https://github.com/tensorflow/tensorboard/blob/master/docs/design/colab_integration.md), which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use [SSH tunneling](https://coderwall.com/p/ohk6cg/remote-access-to-ipython-notebooks-via-ssh) to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, [discussed in the next Guide](https://gradio.app/guides/sharing-your-app/#sharing-demos). ", "html": "

Key Features

\n\n

Let's go through some of the most popular features of Gradio! Here are Gradio's key features:

\n\n
    \n
  1. Adding example inputs
  2. \n
  3. Passing custom error messages
  4. \n
  5. Adding descriptive content
  6. \n
  7. Setting up flagging
  8. \n
  9. Preprocessing and postprocessing
  10. \n
  11. Styling demos
  12. \n
  13. Queuing users
  14. \n
  15. Iterative outputs
  16. \n
  17. Progress bars
  18. \n
  19. Batch functions
  20. \n
  21. Running on collaborative notebooks
  22. \n
\n\n

Example Inputs

\n\n

You can provide example data that a user can easily load into Interface. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a nested list to the examples= keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the Docs.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        if num2 == 0:\n            raise gr.Error(\"Cannot divide by zero!\")\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\", \n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    examples=[\n        [5, \"add\", 3],\n        [4, \"divide\", 2],\n        [-4, \"multiply\", 2.5],\n        [0, \"subtract\", 1.2],\n    ],\n    title=\"Toy Calculator\",\n    description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n
\n\n

\n\n

You can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the examples_per_page argument of Interface).

\n\n

Continue learning about examples in the More On Examples guide.

\n\n

Alerts

\n\n

You wish to pass custom error messages to the user. To do so, raise a gr.Error(\"custom message\") to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the docs.

\n\n

You can also issue gr.Warning(\"message\") and gr.Info(\"message\") by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work.

\n\n

Note below how the gr.Error has to be raised, while the gr.Warning and gr.Info are single lines.

\n\n
def start_process(name):\n    gr.Info(\"Starting process\")\n    if name is None:\n        gr.Warning(\"Name is empty\")\n    ...\n    if success == False:\n        raise gr.Error(\"Process failed\")\n
\n\n

Descriptive Content

\n\n

In the previous example, you may have noticed the title= and description= keyword arguments in the Interface constructor that helps users understand your app.

\n\n

There are three arguments in the Interface constructor to specify where this content should go:

\n\n
    \n
  • title: which accepts text and can display it at the very top of interface, and also becomes the page title.
  • \n
  • description: which accepts text, markdown or HTML and places it right under the title.
  • \n
  • article: which also accepts text, markdown or HTML and places it below the interface.
  • \n
\n\n

\"annotated\"

\n\n

If you're using the Blocks API instead, you can insert text, markdown, or HTML anywhere using the gr.Markdown(...) or gr.HTML(...) components, with descriptive content inside the Component constructor.

\n\n

Another useful keyword argument is label=, which is present in every Component. This modifies the label text at the top of each Component. You can also add the info= keyword argument to form elements like Textbox or Radio to provide further information on their usage.

\n\n
gr.Number(label='Age', info='In years, must be greater than 0')\n
\n\n

Flagging

\n\n

By default, an Interface will have \"Flag\" button. When a user testing your Interface sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the flagging_dir= argument to the Interface constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.

\n\n

For example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- calculator.py\n+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n
\n\n

With the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- sepia.py\n+-- flagged/\n|   +-- logs.csv\n|   +-- im/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.

\n\n

Preprocessing and Postprocessing

\n\n

\"\"

\n\n

As you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.

\n\n

When a component is used as an input, Gradio automatically handles the preprocessing needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a numpy array).

\n\n

Similarly, when a component is used as an output, Gradio automatically handles the postprocessing needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a Gallery of images in base64 format).

\n\n

You can control the preprocessing using the parameters when constructing the image component. For example, here if you instantiate the Image component with the following parameters, it will convert the image to the PIL type and reshape it to be (100, 100) no matter the original size that it was submitted as:

\n\n
img = gr.Image(shape=(100, 100), type=\"pil\")\n
\n\n

In contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:

\n\n
img = gr.Image(invert_colors=True, type=\"numpy\")\n
\n\n

Postprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the Image a numpy array or a str filepath?) and postprocesses it into a format that can be displayed by the browser.

\n\n

Take a look at the Docs to see all the preprocessing-related parameters for each Component.

\n\n

Styling

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Interface constructor. For example:

\n\n
demo = gr.Interface(..., theme=gr.themes.Monochrome())\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.\nThe base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

Some components can be additionally styled through the style() method. For example:

\n\n
img = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n
\n\n

Take a look at the Docs to see all the styling options for each Component.

\n\n

Queuing

\n\n

If your app expects heavy traffic, use the queue() method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).

\n\n

With Interface:

\n\n
demo = gr.Interface(...).queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
with gr.Blocks() as demo:\n    #...\ndemo.queue()\ndemo.launch()\n
\n\n

You can control the number of requests processed at a single time as such:

\n\n
demo.queue(concurrency_count=3)\n
\n\n

See the Docs on queueing on configuring other queuing parameters.

\n\n

To specify only certain functions for queueing in Blocks:

\n\n
with gr.Blocks() as demo2:\n    num1 = gr.Number()\n    num2 = gr.Number()\n    output = gr.Number()\n    gr.Button(\"Add\").click(\n        lambda a, b: a + b, [num1, num2], output)\n    gr.Button(\"Multiply\").click(\n        lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n
\n\n

Iterative Outputs

\n\n

In some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.

\n\n

In such cases, you can supply a generator function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single return value, a function should yield a series of values instead. Usually the yield statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:

\n\n
def my_generator(x):\n    for i in range(x):\n        yield i\n
\n\n

You supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:

\n\n
import gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n    for _ in range(steps):\n        time.sleep(1)\n        image = np.random.random((600, 600, 3))\n        yield image\n    image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n    yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n
\n\n

\n\n

Note that we've added a time.sleep(1) in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).

\n\n

Supplying a generator into Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Progress Bars

\n\n

Gradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a gr.Progress instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the tqdm() method of the Progress instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.

\n\n
import gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n    progress(0, desc=\"Starting\")\n    time.sleep(1)\n    progress(0.05)\n    new_string = \"\"\n    for letter in progress.tqdm(word, desc=\"Reversing\"):\n        time.sleep(0.25)\n        new_string = letter + new_string\n    return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n    demo.queue(concurrency_count=10).launch()\n\n
\n\n

\n\n

If you use the tqdm library, you can even report progress updates automatically from any tqdm.tqdm that already exists within your function by setting the default argument as gr.Progress(track_tqdm=True)!

\n\n

Batch Functions

\n\n

Gradio supports the ability to pass batch functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.

\n\n

For example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:

\n\n
import time\n\ndef trim_words(words, lens):\n    trimmed_words = []\n    time.sleep(5)\n    for w, l in zip(words, lens):\n        trimmed_words.append(w[:int(l)])        \n    return [trimmed_words]\n
\n\n

The advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically batch incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe batch=True and max_batch_size=16 -- both of these parameters can be passed\ninto event triggers or into the Interface class)

\n\n

With Interface:

\n\n
demo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n                    batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        word = gr.Textbox(label=\"word\")\n        leng = gr.Number(label=\"leng\")\n        output = gr.Textbox(label=\"Output\")\n    with gr.Row():\n        run = gr.Button()\n\n    event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n
\n\n

In the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face transformers and diffusers models\nwork very naturally with Gradio's batch mode: here's an example demo using diffusers to\ngenerate images in batches

\n\n

Note: using batch functions with Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Colab Notebooks

\n\n

Gradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as Google Colab. In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by service worker tunneling, which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use SSH tunneling to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, discussed in the next Guide.

\n", "tags": [], "spaces": [], "url": "/guides/key-features/", "contributor": null}, {"name": "sharing-your-app", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 3, "absolute_index": 2, "pretty_name": "Sharing Your App", "content": "# Sharing Your App\n\nHow to share your Gradio app: \n\n1. [Sharing demos with the share parameter](#sharing-demos)\n2. [Hosting on HF Spaces](#hosting-on-hf-spaces)\n3. [Embedding hosted spaces](#embedding-hosted-spaces)\n4. [Embedding with web components](#embedding-with-web-components)\n5. [Using the API page](#api-page)\n6. [Adding authentication to the page](#authentication)\n7. [Accessing Network Requests](#accessing-the-network-request-directly)\n8. [Mounting within FastAPI](#mounting-within-another-fast-api-app)\n9. [Security](#security-and-file-access)\n\n## Sharing Demos\n\nGradio demos can be easily shared publicly by setting `share=True` in the `launch()` method. Like this:\n\n```python\ndemo.launch(share=True)\n```\n\nThis generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: **XXXXX.gradio.app**. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.\n\nKeep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set `share=False` (the default, except in colab notebooks), only a local link is created, which can be shared by [port-forwarding](https://www.ssh.com/ssh/tunneling/example) with specific users. \n\n![sharing](https://github.com/gradio-app/gradio/blob/main/guides/assets/sharing.svg?raw=true)\n\nShare links expire after 72 hours.\n\n## Hosting on HF Spaces\n\nIf you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. [Hugging Face Spaces](http://huggingface.co/spaces/) provides the infrastructure to permanently host your machine learning model for free! \n\nAfter you have [created a free Hugging Face account](https://huggingface.co/join), you have three methods to deploy your Gradio app to Hugging Face Spaces:\n\n1. From terminal: run `gradio deploy` in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on `git push`.\n\n2. From your browser: Drag and drop a folder containing your Gradio model and all related files [here](https://huggingface.co/new-space).\n\n3. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See [this guide how to host on Hugging Face Spaces](https://huggingface.co/blog/gradio-spaces) for more information. \n\n\n\nNote: Some components, like `gr.Image`, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with `show_share_button`, such as `gr.Image(show_share_button=False)`. \n\n![Image with show_share_button=True](https://github.com/gradio-app/gradio/blob/main/guides/assets/share_icon.png?raw=true)\n\n## Embedding Hosted Spaces\n\nOnce you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.\n\nThere are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:\n\n![Embed this Space dropdown option](https://github.com/gradio-app/gradio/blob/main/guides/assets/embed_this_space.png?raw=true)\n\n### Embedding with Web Components\n\nWeb components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app. \n\nTo embed with Web Components:\n\n1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using). \n\n```html\n\n```\n\n2. Add \n```html\n\n```\n\nelement where you want to place the app. Set the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:\n\n\n```html\n\n```\n\n\n\nYou can see examples of how web components look on the Gradio landing page.\n\nYou can also customize the appearance and behavior of your web component with attributes that you pass into the `` tag:\n\n* `src`: as we've seen, the `src` attributes links to the URL of the hosted Gradio demo that you would like to embed\n* `space`: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a `username/space_name` instead of a full URL. Example: `gradio/Echocardiogram-Segmentation`. If this attribute attribute is provided, then `src` does not need to be provided.\n* `control_page_title`: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default `\"false\"`)\n* `initial_height`: the initial height of the web component while it is loading the Gradio app, (by default `\"300px\"`). Note that the final height is set based on the size of the Gradio app.\n* `container`: whether to show the border frame and information about where the Space is hosted (by default `\"true\"`)\n* `info`: whether to show just the information about where the Space is hosted underneath the embedded app (by default `\"true\"`)\n* `autoscroll`: whether to autoscroll to the output when prediction has finished (by default `\"false\"`)\n* `eager`: whether to load the Gradio app as soon as the page loads (by default `\"false\"`)\n* `theme_mode`: whether to use the `dark`, `light`, or default `system` theme mode (by default `\"system\"`)\n\nHere's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px. \n\n```html\n\n```\n\n_Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as `header { ... }` and `footer { ... }` will be the most likely to cause issues._\n\n### Embedding with IFrames\n\nTo embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:\n\n```html\n\n```\n\nAgain, you can find the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.\n\nNote: if you use IFrames, you'll probably want to add a fixed `height` attribute and set `style=\"border:0;\"` to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the `allow` attribute.\n\n## API Page\n\nYou can use almost any Gradio app as an API! In the footer of a Gradio app [like this one](https://huggingface.co/spaces/gradio/hello_world), you'll see a \"Use via API\" link. \n\n![Use via API](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/api3.gif)\n\nThis is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either [the Python client](https://gradio.app/guides/getting-started-with-the-python-client/), or [the JavaScript client](https://gradio.app/guides/getting-started-with-the-js-client/). For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.\n\nThe endpoints are automatically created when you launch a Gradio `Interface`. If you are using Gradio `Blocks`, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as\n\n```python\nbtn.click(add, [num1, num2], output, api_name=\"addition\")\n```\n\nThis will add and document the endpoint `/api/addition/` to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints. \n\n*Note*: For Gradio apps in which [queueing is enabled](https://gradio.app/guides/key-features#queuing), users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set `api_open=False` in the `queue()` method. To disable the API page altogether, set `show_api=False` in `.launch()`.\n\n## Authentication\n\nYou may wish to put an authentication page in front of your app to limit who can open your app. With the `auth=` keyword argument in the `launch()` method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":\n\n```python\ndemo.launch(auth=(\"admin\", \"pass1234\"))\n```\n\nFor more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.\n\nHere's an example of a function that accepts any login where the username and password are the same:\n\n```python\ndef same_auth(username, password):\n return username == password\ndemo.launch(auth=same_auth)\n```\n\nFor authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.\n\n## Accessing the Network Request Directly\n\nWhen a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is `gr.Request` and Gradio will pass in the network request as that parameter. Here is an example:\n\n```python\nimport gradio as gr\n\ndef echo(name, request: gr.Request):\n if request:\n print(\"Request headers dictionary:\", request.headers)\n print(\"IP address:\", request.client.host)\n return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n```\n\nNote: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then `request` will be `None`. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check `if request`.\n\n## Mounting Within Another FastAPI App\n\nIn some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with `gradio.mount_gradio_app()`.\n\nHere's a complete example:\n\n```python\nfrom fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n```\n\nNote that this approach also allows you run your Gradio apps on custom paths (`http://localhost:8000/gradio` in the example above).\n\n## Security and File Access\n\nSharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) **exposes** certain files on the host machine to users of your Gradio app. \n\nIn particular, Gradio apps ALLOW users to access to three kinds of files:\n\n* **Files in the same directory (or a subdirectory) of where the Gradio script is launched from.** For example, if the path to your gradio scripts is `/home/usr/scripts/project/app.py` and you launch it from `/home/usr/scripts/project/`, then users of your shared Gradio app will be able to access any files inside `/home/usr/scripts/project/`. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's `examples`).\n\n* **Temporary files created by Gradio.** These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable `GRADIO_TEMP_DIR` to an absolute path, such as `/home/usr/scripts/project/temp/`.\n\n* **Files that you explicitly allow via the `allowed_paths` parameter in `launch()`**. This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).\n\nGradio DOES NOT ALLOW access to:\n\n* **Dotfiles** (any files whose name begins with `'.'`) or any files that are contained in any directory whose name begins with `'.'`\n\n* **Files that you explicitly allow via the `blocked_paths` parameter in `launch()`**. You can pass in a list of additional directories or exact filepaths to the `blocked_paths` parameter in `launch()`. This parameter takes precedence over the files that Gradio exposes by default or by the `allowed_paths`.\n\n* **Any other paths on the host machine**. Users should NOT be able to access other arbitrary paths on the host. \n\nPlease make sure you are running the latest version of `gradio` for these security settings to apply. ", "html": "

Sharing Your App

\n\n

How to share your Gradio app:

\n\n
    \n
  1. Sharing demos with the share parameter
  2. \n
  3. Hosting on HF Spaces
  4. \n
  5. Embedding hosted spaces
  6. \n
  7. Embedding with web components
  8. \n
  9. Using the API page
  10. \n
  11. Adding authentication to the page
  12. \n
  13. Accessing Network Requests
  14. \n
  15. Mounting within FastAPI
  16. \n
  17. Security
  18. \n
\n\n

Sharing Demos

\n\n

Gradio demos can be easily shared publicly by setting share=True in the launch() method. Like this:

\n\n
demo.launch(share=True)\n
\n\n

This generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: XXXXX.gradio.app. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.

\n\n

Keep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set share=False (the default, except in colab notebooks), only a local link is created, which can be shared by port-forwarding with specific users.

\n\n

\"sharing\"

\n\n

Share links expire after 72 hours.

\n\n

Hosting on HF Spaces

\n\n

If you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. Hugging Face Spaces provides the infrastructure to permanently host your machine learning model for free!

\n\n

After you have created a free Hugging Face account, you have three methods to deploy your Gradio app to Hugging Face Spaces:

\n\n
    \n
  1. From terminal: run gradio deploy in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on git push.

  2. \n
  3. From your browser: Drag and drop a folder containing your Gradio model and all related files here.

  4. \n
  5. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See this guide how to host on Hugging Face Spaces for more information.

  6. \n
\n\n

\n\n

Note: Some components, like gr.Image, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with show_share_button, such as gr.Image(show_share_button=False).

\n\n

\"Imagesharebutton=True\" />

\n\n

Embedding Hosted Spaces

\n\n

Once you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.

\n\n

There are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:

\n\n

\"Embed

\n\n

Embedding with Web Components

\n\n

Web components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app.

\n\n

To embed with Web Components:

\n\n
    \n
  1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using).
  2. \n
\n\n
\n
\n\n
    \n
  1. Add
  2. \n
\n\n
\n
\n\n

element where you want to place the app. Set the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:

\n\n
\n
\n\n\n\n

You can see examples of how web components look on the Gradio landing page.

\n\n

You can also customize the appearance and behavior of your web component with attributes that you pass into the <gradio-app> tag:

\n\n
    \n
  • src: as we've seen, the src attributes links to the URL of the hosted Gradio demo that you would like to embed
  • \n
  • space: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a username/space_name instead of a full URL. Example: gradio/Echocardiogram-Segmentation. If this attribute attribute is provided, then src does not need to be provided.
  • \n
  • control_page_title: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default \"false\")
  • \n
  • initial_height: the initial height of the web component while it is loading the Gradio app, (by default \"300px\"). Note that the final height is set based on the size of the Gradio app.
  • \n
  • container: whether to show the border frame and information about where the Space is hosted (by default \"true\")
  • \n
  • info: whether to show just the information about where the Space is hosted underneath the embedded app (by default \"true\")
  • \n
  • autoscroll: whether to autoscroll to the output when prediction has finished (by default \"false\")
  • \n
  • eager: whether to load the Gradio app as soon as the page loads (by default \"false\")
  • \n
  • theme_mode: whether to use the dark, light, or default system theme mode (by default \"system\")
  • \n
\n\n

Here's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px.

\n\n
\n
\n\n

Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as header { ... } and footer { ... } will be the most likely to cause issues.

\n\n

Embedding with IFrames

\n\n

To embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:

\n\n
\n
\n\n

Again, you can find the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.

\n\n

Note: if you use IFrames, you'll probably want to add a fixed height attribute and set style=\"border:0;\" to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the allow attribute.

\n\n

API Page

\n\n

You can use almost any Gradio app as an API! In the footer of a Gradio app like this one, you'll see a \"Use via API\" link.

\n\n

\"Use

\n\n

This is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either the Python client, or the JavaScript client. For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.

\n\n

The endpoints are automatically created when you launch a Gradio Interface. If you are using Gradio Blocks, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as

\n\n
btn.click(add, [num1, num2], output, api_name=\"addition\")\n
\n\n

This will add and document the endpoint /api/addition/ to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints.

\n\n

Note: For Gradio apps in which queueing is enabled, users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set api_open=False in the queue() method. To disable the API page altogether, set show_api=False in .launch().

\n\n

Authentication

\n\n

You may wish to put an authentication page in front of your app to limit who can open your app. With the auth= keyword argument in the launch() method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":

\n\n
demo.launch(auth=(\"admin\", \"pass1234\"))\n
\n\n

For more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.

\n\n

Here's an example of a function that accepts any login where the username and password are the same:

\n\n
def same_auth(username, password):\n    return username == password\ndemo.launch(auth=same_auth)\n
\n\n

For authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.

\n\n

Accessing the Network Request Directly

\n\n

When a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is gr.Request and Gradio will pass in the network request as that parameter. Here is an example:

\n\n
import gradio as gr\n\ndef echo(name, request: gr.Request):\n    if request:\n        print(\"Request headers dictionary:\", request.headers)\n        print(\"IP address:\", request.client.host)\n    return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n
\n\n

Note: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then request will be None. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check if request.

\n\n

Mounting Within Another FastAPI App

\n\n

In some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with gradio.mount_gradio_app().

\n\n

Here's a complete example:

\n\n
from fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n    return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n
\n\n

Note that this approach also allows you run your Gradio apps on custom paths (http://localhost:8000/gradio in the example above).

\n\n

Security and File Access

\n\n

Sharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) exposes certain files on the host machine to users of your Gradio app.

\n\n

In particular, Gradio apps ALLOW users to access to three kinds of files:

\n\n
    \n
  • Files in the same directory (or a subdirectory) of where the Gradio script is launched from. For example, if the path to your gradio scripts is /home/usr/scripts/project/app.py and you launch it from /home/usr/scripts/project/, then users of your shared Gradio app will be able to access any files inside /home/usr/scripts/project/. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's examples).

  • \n
  • Temporary files created by Gradio. These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable GRADIO_TEMP_DIR to an absolute path, such as /home/usr/scripts/project/temp/.

  • \n
  • Files that you explicitly allow via the allowed_paths parameter in launch(). This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).

  • \n
\n\n

Gradio DOES NOT ALLOW access to:

\n\n
    \n
  • Dotfiles (any files whose name begins with '.') or any files that are contained in any directory whose name begins with '.'

  • \n
  • Files that you explicitly allow via the blocked_paths parameter in launch(). You can pass in a list of additional directories or exact filepaths to the blocked_paths parameter in launch(). This parameter takes precedence over the files that Gradio exposes by default or by the allowed_paths.

  • \n
  • Any other paths on the host machine. Users should NOT be able to access other arbitrary paths on the host.

  • \n
\n\n

Please make sure you are running the latest version of gradio for these security settings to apply.

\n", "tags": [], "spaces": [], "url": "/guides/sharing-your-app/", "contributor": null}]}, {"category": "Building Interfaces", "guides": [{"name": "interface-state", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 1, "absolute_index": 3, "pretty_name": "Interface State", "content": "# Interface State\n\nThis guide covers how State is handled in Gradio. Learn the difference between Global and Session states, and how to use both.\n\n## Global State\n\nYour function may use data that persists beyond a single function call. If the data is something accessible to all function calls and all users, you can create a variable outside the function call and access it inside the function. For example, you may load a large model outside the function and use it inside the function so that every function call does not need to reload the model. \n\n```python\nimport gradio as gr\n\nscores = []\n\ndef track_score(score):\n scores.append(score)\n top_scores = sorted(scores, reverse=True)[:3]\n return top_scores\n\ndemo = gr.Interface(\n track_score, \n gr.Number(label=\"Score\"), \n gr.JSON(label=\"Top Scores\")\n)\ndemo.launch()\n```\n\nIn the code above, the `scores` array is shared between all users. If multiple users are accessing this demo, their scores will all be added to the same list, and the returned top 3 scores will be collected from this shared reference. \n\n## Session State\n\nAnother type of data persistence Gradio supports is session **state**, where data persists across multiple submits within a page session. However, data is *not* shared between different users of your model. To store data in a session state, you need to do three things:\n\n1. Pass in an extra parameter into your function, which represents the state of the interface.\n2. At the end of the function, return the updated value of the state as an extra return value.\n3. Add the `'state'` input and `'state'` output components when creating your `Interface`\n\nA chatbot is an example where you would need session state - you want access to a users previous submissions, but you cannot store chat history in a global variable, because then chat history would get jumbled between different users. \n\n```python\nimport gradio as gr\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nimport torch\n\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-medium\")\nmodel = AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-medium\")\n\n\ndef user(message, history):\n return \"\", history + [[message, None]]\n\n\ndef bot(history):\n user_message = history[-1][0]\n new_user_input_ids = tokenizer.encode(\n user_message + tokenizer.eos_token, return_tensors=\"pt\"\n )\n\n # append the new user input tokens to the chat history\n bot_input_ids = torch.cat([torch.LongTensor([]), new_user_input_ids], dim=-1)\n\n # generate a response\n response = model.generate(\n bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id\n ).tolist()\n\n # convert the tokens to text, and then split the responses into lines\n response = tokenizer.decode(response[0]).split(\"<|endoftext|>\")\n response = [\n (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)\n ] # convert to tuples of list\n history[-1] = response[0]\n return history\n\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.Button(\"Clear\")\n\n msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.launch()\n\n```\n\n\nNotice how the state persists across submits within each page, but if you load this demo in another tab (or refresh the page), the demos will not share chat history. \n\nThe default value of `state` is None. If you pass a default value to the state parameter of the function, it is used as the default value of the state instead. The `Interface` class only supports a single input and outputs state variable, though it can be a list with multiple elements. For more complex use cases, you can use Blocks, [which supports multiple `State` variables](/guides/state-in-blocks/).", "html": "

Interface State

\n\n

This guide covers how State is handled in Gradio. Learn the difference between Global and Session states, and how to use both.

\n\n

Global State

\n\n

Your function may use data that persists beyond a single function call. If the data is something accessible to all function calls and all users, you can create a variable outside the function call and access it inside the function. For example, you may load a large model outside the function and use it inside the function so that every function call does not need to reload the model.

\n\n
import gradio as gr\n\nscores = []\n\ndef track_score(score):\n    scores.append(score)\n    top_scores = sorted(scores, reverse=True)[:3]\n    return top_scores\n\ndemo = gr.Interface(\n    track_score, \n    gr.Number(label=\"Score\"), \n    gr.JSON(label=\"Top Scores\")\n)\ndemo.launch()\n
\n\n

In the code above, the scores array is shared between all users. If multiple users are accessing this demo, their scores will all be added to the same list, and the returned top 3 scores will be collected from this shared reference.

\n\n

Session State

\n\n

Another type of data persistence Gradio supports is session state, where data persists across multiple submits within a page session. However, data is not shared between different users of your model. To store data in a session state, you need to do three things:

\n\n
    \n
  1. Pass in an extra parameter into your function, which represents the state of the interface.
  2. \n
  3. At the end of the function, return the updated value of the state as an extra return value.
  4. \n
  5. Add the 'state' input and 'state' output components when creating your Interface
  6. \n
\n\n

A chatbot is an example where you would need session state - you want access to a users previous submissions, but you cannot store chat history in a global variable, because then chat history would get jumbled between different users.

\n\n
import gradio as gr\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nimport torch\n\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-medium\")\nmodel = AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-medium\")\n\n\ndef user(message, history):\n    return \"\", history + [[message, None]]\n\n\ndef bot(history):\n    user_message = history[-1][0]\n    new_user_input_ids = tokenizer.encode(\n        user_message + tokenizer.eos_token, return_tensors=\"pt\"\n    )\n\n    # append the new user input tokens to the chat history\n    bot_input_ids = torch.cat([torch.LongTensor([]), new_user_input_ids], dim=-1)\n\n    # generate a response\n    response = model.generate(\n        bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id\n    ).tolist()\n\n    # convert the tokens to text, and then split the responses into lines\n    response = tokenizer.decode(response[0]).split(\"<|endoftext|>\")\n    response = [\n        (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)\n    ]  # convert to tuples of list\n    history[-1] = response[0]\n    return history\n\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.Button(\"Clear\")\n\n    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.launch()\n\n
\n\n

\n\n

Notice how the state persists across submits within each page, but if you load this demo in another tab (or refresh the page), the demos will not share chat history.

\n\n

The default value of state is None. If you pass a default value to the state parameter of the function, it is used as the default value of the state instead. The Interface class only supports a single input and outputs state variable, though it can be a list with multiple elements. For more complex use cases, you can use Blocks, which supports multiple State variables.

\n", "tags": [], "spaces": [], "url": "/guides/interface-state/", "contributor": null}, {"name": "reactive-interfaces", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 2, "absolute_index": 4, "pretty_name": "Reactive Interfaces", "content": "# Reactive Interfaces\n\nThis guide covers how to get Gradio interfaces to refresh automatically or continuously stream data.\n\n## Live Interfaces\n\nYou can make interfaces automatically refresh by setting `live=True` in the interface. Now the interface will recalculate as soon as the user input changes.\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\",\n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n live=True,\n)\ndemo.launch()\n\n```\n\n\nNote there is no submit button, because the interface resubmits automatically on change.\n\n## Streaming Components\n\nSome components have a \"streaming\" mode, such as `Audio` component in microphone mode, or the `Image` component in webcam mode. Streaming means data is sent continuously to the backend and the `Interface` function is continuously being rerun. \n\nThe difference between `gr.Audio(source='microphone')` and `gr.Audio(source='microphone', streaming=True)`, when both are used in `gr.Interface(live=True)`, is that the first `Component` will automatically submit data and run the `Interface` function when the user stops recording, whereas the second `Component` will continuously send data and run the `Interface` function *during* recording.\n\nHere is example code of streaming images from the webcam.\n\n```python\nimport gradio as gr\nimport numpy as np\n\ndef flip(im):\n return np.flipud(im)\n\ndemo = gr.Interface(\n flip, \n gr.Image(source=\"webcam\", streaming=True), \n \"image\",\n live=True\n)\ndemo.launch()\n \n```", "html": "

Reactive Interfaces

\n\n

This guide covers how to get Gradio interfaces to refresh automatically or continuously stream data.

\n\n

Live Interfaces

\n\n

You can make interfaces automatically refresh by setting live=True in the interface. Now the interface will recalculate as soon as the user input changes.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\",\n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    live=True,\n)\ndemo.launch()\n\n
\n\n

\n\n

Note there is no submit button, because the interface resubmits automatically on change.

\n\n

Streaming Components

\n\n

Some components have a \"streaming\" mode, such as Audio component in microphone mode, or the Image component in webcam mode. Streaming means data is sent continuously to the backend and the Interface function is continuously being rerun.

\n\n

The difference between gr.Audio(source='microphone') and gr.Audio(source='microphone', streaming=True), when both are used in gr.Interface(live=True), is that the first Component will automatically submit data and run the Interface function when the user stops recording, whereas the second Component will continuously send data and run the Interface function during recording.

\n\n

Here is example code of streaming images from the webcam.

\n\n
import gradio as gr\nimport numpy as np\n\ndef flip(im):\n    return np.flipud(im)\n\ndemo = gr.Interface(\n    flip, \n    gr.Image(source=\"webcam\", streaming=True), \n    \"image\",\n    live=True\n)\ndemo.launch()\n\n
\n", "tags": [], "spaces": [], "url": "/guides/reactive-interfaces/", "contributor": null}, {"name": "more-on-examples", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 3, "absolute_index": 5, "pretty_name": "More On Examples", "content": "# More on Examples\n\nThis guide covers what more you can do with Examples: Loading examples from a directory, providing partial examples, and caching. If Examples is new to you, check out the intro in the [Key Features](/guides/key-features/#example-inputs) guide. \n\n## Providing Examples\n\nAs covered in the [Key Features](/guides/key-features/#example-inputs) guide, adding examples to an Interface is as easy as providing a list of lists to the `examples`\nkeyword argument. \nEach sublist is a data sample, where each element corresponds to an input of the prediction function.\nThe inputs must be ordered in the same order as the prediction function expects them.\n\nIf your interface only has one input component, then you can provide your examples as a regular list instead of a list of lists.\n\n### Loading Examples from a Directory\n\nYou can also specify a path to a directory containing your examples. If your Interface takes only a single file-type input, e.g. an image classifier, you can simply pass a directory filepath to the `examples=` argument, and the `Interface` will load the images in the directory as examples. \nIn the case of multiple inputs, this directory must\ncontain a log.csv file with the example values.\nIn the context of the calculator demo, we can set `examples='/demo/calculator/examples'` and in that directory we include the following `log.csv` file:\n```csv\nnum,operation,num2\n5,\"add\",3\n4,\"divide\",2\n5,\"multiply\",3\n```\n\nThis can be helpful when browsing flagged data. Simply point to the flagged directory and the `Interface` will load the examples from the flagged data.\n\n### Providing Partial Examples\n\nSometimes your app has many input components, but you would only like to provide examples for a subset of them. In order to exclude some inputs from the examples, pass `None` for all data samples corresponding to those particular components.\n\n## Caching examples\n\nYou may wish to provide some cached examples of your model for users to quickly try out, in case your model takes a while to run normally.\nIf `cache_examples=True`, the `Interface` will run all of your examples through your app and save the outputs when you call the `launch()` method. This data will be saved in a directory called `gradio_cached_examples`. \n\nWhenever a user clicks on an example, the output will automatically be populated in the app now, using data from this cached directory instead of actually running the function. This is useful so users can quickly try out your model without adding any load! \n\nKeep in mind once the cache is generated, it will not be updated in future launches. If the examples or function logic change, delete the cache folder to clear the cache and rebuild it with another `launch()`.\n\n", "html": "

More on Examples

\n\n

This guide covers what more you can do with Examples: Loading examples from a directory, providing partial examples, and caching. If Examples is new to you, check out the intro in the Key Features guide.

\n\n

Providing Examples

\n\n

As covered in the Key Features guide, adding examples to an Interface is as easy as providing a list of lists to the examples\nkeyword argument. \nEach sublist is a data sample, where each element corresponds to an input of the prediction function.\nThe inputs must be ordered in the same order as the prediction function expects them.

\n\n

If your interface only has one input component, then you can provide your examples as a regular list instead of a list of lists.

\n\n

Loading Examples from a Directory

\n\n

You can also specify a path to a directory containing your examples. If your Interface takes only a single file-type input, e.g. an image classifier, you can simply pass a directory filepath to the examples= argument, and the Interface will load the images in the directory as examples. \nIn the case of multiple inputs, this directory must\ncontain a log.csv file with the example values.\nIn the context of the calculator demo, we can set examples='/demo/calculator/examples' and in that directory we include the following log.csv file:

\n\n
num,operation,num2\n5,\"add\",3\n4,\"divide\",2\n5,\"multiply\",3\n
\n\n

This can be helpful when browsing flagged data. Simply point to the flagged directory and the Interface will load the examples from the flagged data.

\n\n

Providing Partial Examples

\n\n

Sometimes your app has many input components, but you would only like to provide examples for a subset of them. In order to exclude some inputs from the examples, pass None for all data samples corresponding to those particular components.

\n\n

Caching examples

\n\n

You may wish to provide some cached examples of your model for users to quickly try out, in case your model takes a while to run normally.\nIf cache_examples=True, the Interface will run all of your examples through your app and save the outputs when you call the launch() method. This data will be saved in a directory called gradio_cached_examples.

\n\n

Whenever a user clicks on an example, the output will automatically be populated in the app now, using data from this cached directory instead of actually running the function. This is useful so users can quickly try out your model without adding any load!

\n\n

Keep in mind once the cache is generated, it will not be updated in future launches. If the examples or function logic change, delete the cache folder to clear the cache and rebuild it with another launch().

\n", "tags": [], "spaces": [], "url": "/guides/more-on-examples/", "contributor": null}, {"name": "advanced-interface-features", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 4, "absolute_index": 6, "pretty_name": "Advanced Interface Features", "content": "# Advanced Interface Features\n\nThere's more to cover on the [Interface](https://gradio.app/docs#interface) class. This guide covers all the advanced features: Using [Interpretation](https://gradio.app/docs#interpretation), custom styling, loading from the [Hugging Face Hub](https://hf.co), and using [Parallel](https://gradio.app/docs#parallel) and [Series](https://gradio.app/docs#series). \n\n## Interpreting your Predictions\n\nMost models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the `interpretation` keyword in the `Interface` class to `default`. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:\n\n```python\nimport requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2() # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n```\n\n\nIn addition to `default`, Gradio also includes [Shapley-based interpretation](https://christophm.github.io/interpretable-ml-book/shap.html), which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the `interpretation` parameter to `\"shap\"` (note: also make sure the python package `shap` is installed). Optionally, you can modify the `num_shap` parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:\n\n```python\ngr.Interface(fn=classify_image,\n inputs=image, \n outputs=label, \n interpretation=\"shap\", \n num_shap=5).launch()\n```\n\nThis will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's `default` or `shap` interpretation, the output component must be a `Label`. All common input components are supported. Here is an example with text input.\n\n```python\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=\"default\",\n)\n\ndemo.launch()\n\n```\n\nSo what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.\n\nYou can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.\n\n```python\nimport re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n result = gender_of_sentence(sentence)\n is_male = result[\"male\"] > result[\"female\"]\n interpretation = []\n for word in re.split(\"( )\", sentence):\n score = 0\n token = word.lower()\n if (is_male and token in male_words) or (not is_male and token in female_words):\n score = 1\n elif (is_male and token in female_words) or (\n not is_male and token in male_words\n ):\n score = -1\n interpretation.append((word, score))\n # Output must be a list of lists containing the same number of elements as inputs\n # Each element corresponds to the interpretation scores for the given input\n return [interpretation]\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n```\n\nLearn more about Interpretation in the [docs](https://gradio.app/docs#interpretation). \n\n## Custom Styling\n\nIf you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the `css` parameter of the `Interface` class.\n\n```python\ngr.Interface(..., css=\"body {background-color: red}\")\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\ngr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n```\n\n**Warning**: Custom CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using [Themes](/guides/theming-guide/) whenever possible. \n\n## Loading Hugging Face Models and Spaces\n\nGradio integrates nicely with the [Hugging Face Hub](https://hf.co), allowing you to load models and Spaces with just one line of code. To use this, simply use the `load()` method in the `Interface` class. So:\n\n- To load any model from the Hugging Face Hub and create an interface around it, you pass `\"model/\"` or `\"huggingface/\"` followed by the model name, like these examples:\n\n```python\ngr.Interface.load(\"huggingface/gpt2\").launch();\n```\n\n```python\ngr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n inputs=gr.Textbox(lines=5, label=\"Input Text\") # customizes the input component\n).launch()\n```\n\n- To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass `\"spaces/\"` followed by the model name:\n\n```python\ngr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n inputs=\"webcam\", \n title=\"Remove your webcam background!\").launch()\n```\n\nOne of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting `Interface` object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):\n\n```python\nio = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\") # outputs model completion\n```\n\n## Putting Interfaces in Parallel and Series\n\nGradio also lets you mix interfaces very easily using the `gradio.Parallel` and `gradio.Series` classes. `Parallel` lets you put two similar models (if they have the same input type) in parallel to compare model predictions:\n\n```python\ngenerator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n```\n\n`Series` lets you put models and spaces in series, piping the output of one model into the input of the next model. \n\n```python\ngenerator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch() \n# this demo generates text, then translates it to German, and outputs the final result.\n```\n\nAnd of course, you can also mix `Parallel` and `Series` together whenever that makes sense!\n\nLearn more about Parallel and Series in the [docs](https://gradio.app/docs#parallel). ", "html": "

Advanced Interface Features

\n\n

There's more to cover on the Interface class. This guide covers all the advanced features: Using Interpretation, custom styling, loading from the Hugging Face Hub, and using Parallel and Series.

\n\n

Interpreting your Predictions

\n\n

Most models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the interpretation keyword in the Interface class to default. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:

\n\n
import requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2()  # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n    inp = inp.reshape((-1, 224, 224, 3))\n    inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n    prediction = inception_net.predict(inp).flatten()\n    return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n    fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n
\n\n

In addition to default, Gradio also includes Shapley-based interpretation, which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the interpretation parameter to \"shap\" (note: also make sure the python package shap is installed). Optionally, you can modify the num_shap parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:

\n\n
gr.Interface(fn=classify_image,\n            inputs=image, \n            outputs=label, \n            interpretation=\"shap\", \n            num_shap=5).launch()\n
\n\n

This will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's default or shap interpretation, the output component must be a Label. All common input components are supported. Here is an example with text input.

\n\n
import gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=\"default\",\n)\n\ndemo.launch()\n\n
\n\n

So what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.

\n\n

You can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.

\n\n
import re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n    result = gender_of_sentence(sentence)\n    is_male = result[\"male\"] > result[\"female\"]\n    interpretation = []\n    for word in re.split(\"( )\", sentence):\n        score = 0\n        token = word.lower()\n        if (is_male and token in male_words) or (not is_male and token in female_words):\n            score = 1\n        elif (is_male and token in female_words) or (\n            not is_male and token in male_words\n        ):\n            score = -1\n        interpretation.append((word, score))\n    # Output must be a list of lists containing the same number of elements as inputs\n    # Each element corresponds to the interpretation scores for the given input\n    return [interpretation]\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n
\n\n

Learn more about Interpretation in the docs.

\n\n

Custom Styling

\n\n

If you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the css parameter of the Interface class.

\n\n
gr.Interface(..., css=\"body {background-color: red}\")\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
gr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n
\n\n

Warning: Custom CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using Themes whenever possible.

\n\n

Loading Hugging Face Models and Spaces

\n\n

Gradio integrates nicely with the Hugging Face Hub, allowing you to load models and Spaces with just one line of code. To use this, simply use the load() method in the Interface class. So:

\n\n
    \n
  • To load any model from the Hugging Face Hub and create an interface around it, you pass \"model/\" or \"huggingface/\" followed by the model name, like these examples:
  • \n
\n\n
gr.Interface.load(\"huggingface/gpt2\").launch();\n
\n\n
gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n    inputs=gr.Textbox(lines=5, label=\"Input Text\")  # customizes the input component\n).launch()\n
\n\n
    \n
  • To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass \"spaces/\" followed by the model name:
  • \n
\n\n
gr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n                  inputs=\"webcam\", \n                  title=\"Remove your webcam background!\").launch()\n
\n\n

One of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting Interface object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):

\n\n
io = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\")  # outputs model completion\n
\n\n

Putting Interfaces in Parallel and Series

\n\n

Gradio also lets you mix interfaces very easily using the gradio.Parallel and gradio.Series classes. Parallel lets you put two similar models (if they have the same input type) in parallel to compare model predictions:

\n\n
generator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n
\n\n

Series lets you put models and spaces in series, piping the output of one model into the input of the next model.

\n\n
generator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch()  \n# this demo generates text, then translates it to German, and outputs the final result.\n
\n\n

And of course, you can also mix Parallel and Series together whenever that makes sense!

\n\n

Learn more about Parallel and Series in the docs.

\n", "tags": [], "spaces": [], "url": "/guides/advanced-interface-features/", "contributor": null}, {"name": "four-kinds-of-interfaces", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 5, "absolute_index": 7, "pretty_name": "Four Kinds Of Interfaces", "content": "# The 4 Kinds of Gradio Interfaces\n\nSo far, we've always assumed that in order to build an Gradio demo, you need both inputs and outputs. But this isn't always the case for machine learning demos: for example, *unconditional image generation models* don't take any input but produce an image as the output.\n\nIt turns out that the `gradio.Interface` class can actually handle 4 different kinds of demos:\n\n1. **Standard demos**: which have both separate inputs and outputs (e.g. an image classifier or speech-to-text model)\n2. **Output-only demos**: which don't take any input but produce on output (e.g. an unconditional image generation model)\n3. **Input-only demos**: which don't produce any output but do take in some sort of input (e.g. a demo that saves images that you upload to a persistent external database)\n4. **Unified demos**: which have both input and output components, but the input and output components *are the same*. This means that the output produced overrides the input (e.g. a text autocomplete model)\n\nDepending on the kind of demo, the user interface (UI) looks slightly different:\n\n![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/interfaces4.png)\n\n\nLet's see how to build each kind of demo using the `Interface` class, along with examples:\n\n\n## Standard demos\n\nTo create a demo that has both the input and the output components, you simply need to set the values of the `inputs` and `outputs` parameter in `Interface()`. Here's an example demo of a simple image filter:\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n sepia_filter = np.array([\n [0.393, 0.769, 0.189], \n [0.349, 0.686, 0.168], \n [0.272, 0.534, 0.131]\n ])\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n```\n\n\n\n## Output-only demos\n\nWhat about demos that only contain outputs? In order to build such a demo, you simply set the value of the `inputs` parameter in `Interface()` to `None`. Here's an example demo of a mock image generation model:\n\n```python\nimport time\n\nimport gradio as gr\n\n\ndef fake_gan():\n time.sleep(1)\n images = [\n \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n ]\n return images\n\n\ndemo = gr.Interface(\n fn=fake_gan,\n inputs=None,\n outputs=gr.Gallery(label=\"Generated Images\").style(grid=[2]),\n title=\"FD-GAN\",\n description=\"This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.\",\n)\n\ndemo.launch()\n\n```\n\n\n## Input-only demos\n\nSimilarly, to create a demo that only contains inputs, set the value of `outputs` parameter in `Interface()` to be `None`. Here's an example demo that saves any uploaded image to disk:\n\n```python\nimport random\nimport string\nimport gradio as gr \n\ndef save_image_random_name(image):\n random_string = ''.join(random.choices(string.ascii_letters, k=20)) + '.png'\n image.save(random_string)\n print(f\"Saved image to {random_string}!\")\n\ndemo = gr.Interface(\n fn=save_image_random_name, \n inputs=gr.Image(type=\"pil\"), \n outputs=None,\n)\ndemo.launch()\n```\n\n\n## Unified demos\n\nA demo that has a single component as both the input and the output. It can simply be created by setting the values of the `inputs` and `outputs` parameter as the same component. Here's an example demo of a text generation model:\n\n```python\nimport gradio as gr\nfrom transformers import pipeline\n\ngenerator = pipeline('text-generation', model = 'gpt2')\n\ndef generate_text(text_prompt):\n response = generator(text_prompt, max_length = 30, num_return_sequences=5)\n return response[0]['generated_text']\n\ntextbox = gr.Textbox()\n\ndemo = gr.Interface(generate_text, textbox, textbox)\n\ndemo.launch()\n\n```\n\n", "html": "

The 4 Kinds of Gradio Interfaces

\n\n

So far, we've always assumed that in order to build an Gradio demo, you need both inputs and outputs. But this isn't always the case for machine learning demos: for example, unconditional image generation models don't take any input but produce an image as the output.

\n\n

It turns out that the gradio.Interface class can actually handle 4 different kinds of demos:

\n\n
    \n
  1. Standard demos: which have both separate inputs and outputs (e.g. an image classifier or speech-to-text model)
  2. \n
  3. Output-only demos: which don't take any input but produce on output (e.g. an unconditional image generation model)
  4. \n
  5. Input-only demos: which don't produce any output but do take in some sort of input (e.g. a demo that saves images that you upload to a persistent external database)
  6. \n
  7. Unified demos: which have both input and output components, but the input and output components are the same. This means that the output produced overrides the input (e.g. a text autocomplete model)
  8. \n
\n\n

Depending on the kind of demo, the user interface (UI) looks slightly different:

\n\n

\"\"

\n\n

Let's see how to build each kind of demo using the Interface class, along with examples:

\n\n

Standard demos

\n\n

To create a demo that has both the input and the output components, you simply need to set the values of the inputs and outputs parameter in Interface(). Here's an example demo of a simple image filter:

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n    sepia_filter = np.array([\n        [0.393, 0.769, 0.189], \n        [0.349, 0.686, 0.168], \n        [0.272, 0.534, 0.131]\n    ])\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n
\n\n

\n\n

Output-only demos

\n\n

What about demos that only contain outputs? In order to build such a demo, you simply set the value of the inputs parameter in Interface() to None. Here's an example demo of a mock image generation model:

\n\n
import time\n\nimport gradio as gr\n\n\ndef fake_gan():\n    time.sleep(1)\n    images = [\n            \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n            \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n            \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n    ]\n    return images\n\n\ndemo = gr.Interface(\n    fn=fake_gan,\n    inputs=None,\n    outputs=gr.Gallery(label=\"Generated Images\").style(grid=[2]),\n    title=\"FD-GAN\",\n    description=\"This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.\",\n)\n\ndemo.launch()\n\n
\n\n

\n\n

Input-only demos

\n\n

Similarly, to create a demo that only contains inputs, set the value of outputs parameter in Interface() to be None. Here's an example demo that saves any uploaded image to disk:

\n\n
import random\nimport string\nimport gradio as gr \n\ndef save_image_random_name(image):\n    random_string = ''.join(random.choices(string.ascii_letters, k=20)) + '.png'\n    image.save(random_string)\n    print(f\"Saved image to {random_string}!\")\n\ndemo = gr.Interface(\n    fn=save_image_random_name, \n    inputs=gr.Image(type=\"pil\"), \n    outputs=None,\n)\ndemo.launch()\n
\n\n

\n\n

Unified demos

\n\n

A demo that has a single component as both the input and the output. It can simply be created by setting the values of the inputs and outputs parameter as the same component. Here's an example demo of a text generation model:

\n\n
import gradio as gr\nfrom transformers import pipeline\n\ngenerator = pipeline('text-generation', model = 'gpt2')\n\ndef generate_text(text_prompt):\n  response = generator(text_prompt, max_length = 30, num_return_sequences=5)\n  return response[0]['generated_text']\n\ntextbox = gr.Textbox()\n\ndemo = gr.Interface(generate_text, textbox, textbox)\n\ndemo.launch()\n\n
\n\n

\n", "tags": [], "spaces": [], "url": "/guides/four-kinds-of-interfaces/", "contributor": null}]}, {"category": "Building With Blocks", "guides": [{"name": "blocks-and-event-listeners", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 1, "absolute_index": 8, "pretty_name": "Blocks And Event Listeners", "content": "# Blocks and Event Listeners\n\nWe took a quick look at Blocks in the [Quickstart](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control). Let's dive deeper. This guide will cover the how Blocks are structured, event listeners and their types, running events continuously, updating configurations, and using dictionaries vs lists. \n\n## Blocks Structure\n\nTake a look at the demo below.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n name = gr.Textbox(label=\"Name\")\n output = gr.Textbox(label=\"Output Box\")\n greet_btn = gr.Button(\"Greet\")\n greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n \n\ndemo.launch()\n```\n\n\n- First, note the `with gr.Blocks() as demo:` clause. The Blocks app code will be contained within this clause.\n- Next come the Components. These are the same Components used in `Interface`. However, instead of being passed to some constructor, Components are automatically added to the Blocks as they are created within the `with` clause.\n- Finally, the `click()` event listener. Event listeners define the data flow within the app. In the example above, the listener ties the two Textboxes together. The Textbox `name` acts as the input and Textbox `output` acts as the output to the `greet` method. This dataflow is triggered when the Button `greet_btn` is clicked. Like an Interface, an event listener can take multiple inputs or outputs.\n\n## Event Listeners and Interactivity\n\nIn the example above, you'll notice that you are able to edit Textbox `name`, but not Textbox `output`. This is because any Component that acts as an input to an event listener is made interactive. However, since Textbox `output` acts only as an output, Gradio determines that it should not be made interactive. You can override the default behavior and directly configure the interactivity of a Component with the boolean `interactive` keyword argument. \n\n```python\noutput = gr.Textbox(label=\"Output\", interactive=True)\n```\n\n_Note_: What happens if a Gradio component is neither an input nor an output? If a component is constructed with a default value, then it is presumed to be displaying content and is rendered non-interactive. Otherwise, it is rendered interactive. Again, this behavior can be overridden by specifying a value for the `interactive` argument.\n\n## Types of Event Listeners\n\nTake a look at the demo below:\n\n```python\nimport gradio as gr\n\ndef welcome(name):\n return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Hello World!\n Start typing below to see the output.\n \"\"\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n inp.change(welcome, inp, out)\n\ndemo.launch()\n```\n\n\nInstead of being triggered by a click, the `welcome` function is triggered by typing in the Textbox `inp`. This is due to the `change()` event listener. Different Components support different event listeners. For example, the `Video` Component supports a `play()` event listener, triggered when a user presses play. See the [Docs](http://gradio.app/docs#components) for the event listeners for each Component.\n\n## Multiple Data Flows\n\nA Blocks app is not limited to a single data flow the way Interfaces are. Take a look at the demo below:\n\n```python\nimport gradio as gr\n\ndef increase(num):\n return num + 1\n\nwith gr.Blocks() as demo:\n a = gr.Number(label=\"a\")\n b = gr.Number(label=\"b\")\n btoa = gr.Button(\"a > b\")\n atob = gr.Button(\"b > a\")\n atob.click(increase, a, b)\n btoa.click(increase, b, a)\n\ndemo.launch()\n```\n\n\nNote that `num1` can act as input to `num2`, and also vice-versa! As your apps get more complex, you will have many data flows connecting various Components. \n\nHere's an example of a \"multi-step\" demo, where the output of one model (a speech-to-text model) gets fed into the next model (a sentiment classifier).\n\n```python\nfrom transformers import pipeline\n\nimport gradio as gr\n\nasr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\nclassifier = pipeline(\"text-classification\")\n\n\ndef speech_to_text(speech):\n text = asr(speech)[\"text\"]\n return text\n\n\ndef text_to_sentiment(text):\n return classifier(text)[0][\"label\"]\n\n\ndemo = gr.Blocks()\n\nwith demo:\n audio_file = gr.Audio(type=\"filepath\")\n text = gr.Textbox()\n label = gr.Label()\n\n b1 = gr.Button(\"Recognize Speech\")\n b2 = gr.Button(\"Classify Sentiment\")\n\n b1.click(speech_to_text, inputs=audio_file, outputs=text)\n b2.click(text_to_sentiment, inputs=text, outputs=label)\n\ndemo.launch()\n\n```\n\n\n## Function Input List vs Dict\n\nThe event listeners you've seen so far have a single input component. If you'd like to have multiple input components pass data to the function, you have two options on how the function can accept input component values:\n\n1. as a list of arguments, or\n2. as a single dictionary of values, keyed by the component\n\nLet's see an example of each:\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n a = gr.Number(label=\"a\")\n b = gr.Number(label=\"b\")\n with gr.Row():\n add_btn = gr.Button(\"Add\")\n sub_btn = gr.Button(\"Subtract\")\n c = gr.Number(label=\"sum\")\n\n def add(num1, num2):\n return num1 + num2\n add_btn.click(add, inputs=[a, b], outputs=c)\n\n def sub(data):\n return data[a] - data[b]\n sub_btn.click(sub, inputs={a, b}, outputs=c)\n\n\ndemo.launch()\n```\n\nBoth `add()` and `sub()` take `a` and `b` as inputs. However, the syntax is different between these listeners. \n\n1. To the `add_btn` listener, we pass the inputs as a list. The function `add()` takes each of these inputs as arguments. The value of `a` maps to the argument `num1`, and the value of `b` maps to the argument `num2`.\n2. To the `sub_btn` listener, we pass the inputs as a set (note the curly brackets!). The function `sub()` takes a single dictionary argument `data`, where the keys are the input components, and the values are the values of those components.\n\nIt is a matter of preference which syntax you prefer! For functions with many input components, option 2 may be easier to manage.\n\n\n\n## Function Return List vs Dict\n\nSimilarly, you may return values for multiple output components either as:\n\n1. a list of values, or\n2. a dictionary keyed by the component\n\nLet's first see an example of (1), where we set the values of two output components by returning two values:\n\n```python\nwith gr.Blocks() as demo:\n food_box = gr.Number(value=10, label=\"Food Count\")\n status_box = gr.Textbox()\n def eat(food):\n if food > 0:\n return food - 1, \"full\"\n else:\n return 0, \"hungry\"\n gr.Button(\"EAT\").click(\n fn=eat, \n inputs=food_box,\n outputs=[food_box, status_box]\n )\n```\n\nAbove, each return statement returns two values corresponding to `food_box` and `status_box`, respectively.\n\nInstead of returning a list of values corresponding to each output component in order, you can also return a dictionary, with the key corresponding to the output component and the value as the new value. This also allows you to skip updating some output components. \n\n```python\nwith gr.Blocks() as demo:\n food_box = gr.Number(value=10, label=\"Food Count\")\n status_box = gr.Textbox()\n def eat(food):\n if food > 0:\n return {food_box: food - 1, status_box: \"full\"}\n else:\n return {status_box: \"hungry\"}\n gr.Button(\"EAT\").click(\n fn=eat, \n inputs=food_box,\n outputs=[food_box, status_box]\n )\n```\n\nNotice how when there is no food, we only update the `status_box` element. We skipped updating the `food_box` component.\n\nDictionary returns are helpful when an event listener affects many components on return, or conditionally affects outputs and not others.\n\nKeep in mind that with dictionary returns, we still need to specify the possible outputs in the event listener.\n\n## Updating Component Configurations\n\nThe return value of an event listener function is usually the updated value of the corresponding output Component. Sometimes we want to update the configuration of the Component as well, such as the visibility. In this case, we return a `gr.update()` object instead of just the update Component value.\n\n```python\nimport gradio as gr\n\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.update(lines=2, visible=True, value=\"Short story: \")\n elif choice == \"long\":\n return gr.update(lines=8, visible=True, value=\"Long story...\")\n else:\n return gr.update(visible=False)\n\nwith gr.Blocks() as demo:\n radio = gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"Essay Length to Write?\"\n )\n text = gr.Textbox(lines=2, interactive=True)\n radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\ndemo.launch()\n```\n\n\nSee how we can configure the Textbox itself through the `gr.update()` method. The `value=` argument can still be used to update the value along with Component configuration.\n\n## Running Events Consecutively\n\nYou can also run events consecutively by using the `then` method of an event listener. This will run an event after the previous event has finished running. This is useful for running events that update components in multiple steps. \n\nFor example, in the chatbot example below, we first update the chatbot with the user message immediately, and then update the chatbot with the computer response after a simulated delay.\n\n```python\nimport gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.Button(\"Clear\")\n\n def user(user_message, history):\n return \"\", history + [[user_message, None]]\n\n def bot(history):\n bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n time.sleep(2)\n history[-1][1] = bot_message\n return history\n\n msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n clear.click(lambda: None, None, chatbot, queue=False)\n \ndemo.queue()\ndemo.launch()\n\n```\n\n\nThe `.then()` method of an event listener executes the subsequent event regardless of whether the previous event raised any errors. If you'd like to only run subsequent events if the previous event executed successfully, use the `.success()` method, which takes the same arguments as `.then()`.\n\n## Running Events Continuously\n\nYou can run events on a fixed schedule using the `every` parameter of the event listener. This will run the event\n`every` number of seconds while the client connection is open. If the connection is closed, the event will stop running after the following iteration.\nNote that this does not take into account the runtime of the event itself. So a function\nwith a 1 second runtime running with `every=5`, would actually run every 6 seconds.\n\nHere is an example of a sine curve that updates every second!\n\n```python\nimport math\nimport gradio as gr\nimport plotly.express as px\nimport numpy as np\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2*math.pi*period * x)\n fig = px.line(x=x, y=y)\n plot_end += 2 * math.pi\n if plot_end > 1000:\n plot_end = 2 * math.pi\n return fig\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n plot = gr.Plot(label=\"Plot (updates every half second)\")\n\n dep = demo.load(get_plot, None, plot, every=1)\n period.change(get_plot, period, plot, every=1, cancels=[dep])\n\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n\n```\n\n\n## Gathering Event Data\n\nYou can gather specific data about an event by adding the associated event data class as a type hint to an argument in the event listener function. \n\nFor example, event data for `.select()` can be type hinted by a `gradio.SelectData` argument. This event is triggered when a user selects some part of the triggering component, and the event data includes information about what the user specifically selected. If a user selected a specific word in a `Textbox`, a specific image in a `Gallery`, or a specific cell in a `DataFrame`, the event data argument would contain information about the specific selection.\n\nIn the 2 player tic-tac-toe demo below, a user can select a cell in the `DataFrame` to make a move. The event data argument contains information about the specific cell that was selected. We can first check to see if the cell is empty, and then update the cell with the user's move. \n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n\n def place(board, turn, evt: gr.SelectData):\n if evt.value:\n return board, turn\n board[evt.index[0]][evt.index[1]] = turn\n turn = \"O\" if turn == \"X\" else \"X\"\n return board, turn\n\n board.select(place, [board, turn], [board, turn])\n\ndemo.launch()\n```\n", "html": "

Blocks and Event Listeners

\n\n

We took a quick look at Blocks in the Quickstart. Let's dive deeper. This guide will cover the how Blocks are structured, event listeners and their types, running events continuously, updating configurations, and using dictionaries vs lists.

\n\n

Blocks Structure

\n\n

Take a look at the demo below.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n    name = gr.Textbox(label=\"Name\")\n    output = gr.Textbox(label=\"Output Box\")\n    greet_btn = gr.Button(\"Greet\")\n    greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n\n\ndemo.launch()\n
\n\n

\n\n
    \n
  • First, note the with gr.Blocks() as demo: clause. The Blocks app code will be contained within this clause.
  • \n
  • Next come the Components. These are the same Components used in Interface. However, instead of being passed to some constructor, Components are automatically added to the Blocks as they are created within the with clause.
  • \n
  • Finally, the click() event listener. Event listeners define the data flow within the app. In the example above, the listener ties the two Textboxes together. The Textbox name acts as the input and Textbox output acts as the output to the greet method. This dataflow is triggered when the Button greet_btn is clicked. Like an Interface, an event listener can take multiple inputs or outputs.
  • \n
\n\n

Event Listeners and Interactivity

\n\n

In the example above, you'll notice that you are able to edit Textbox name, but not Textbox output. This is because any Component that acts as an input to an event listener is made interactive. However, since Textbox output acts only as an output, Gradio determines that it should not be made interactive. You can override the default behavior and directly configure the interactivity of a Component with the boolean interactive keyword argument.

\n\n
output = gr.Textbox(label=\"Output\", interactive=True)\n
\n\n

Note: What happens if a Gradio component is neither an input nor an output? If a component is constructed with a default value, then it is presumed to be displaying content and is rendered non-interactive. Otherwise, it is rendered interactive. Again, this behavior can be overridden by specifying a value for the interactive argument.

\n\n

Types of Event Listeners

\n\n

Take a look at the demo below:

\n\n
import gradio as gr\n\ndef welcome(name):\n    return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\n    \"\"\"\n    # Hello World!\n    Start typing below to see the output.\n    \"\"\")\n    inp = gr.Textbox(placeholder=\"What is your name?\")\n    out = gr.Textbox()\n    inp.change(welcome, inp, out)\n\ndemo.launch()\n
\n\n

\n\n

Instead of being triggered by a click, the welcome function is triggered by typing in the Textbox inp. This is due to the change() event listener. Different Components support different event listeners. For example, the Video Component supports a play() event listener, triggered when a user presses play. See the Docs for the event listeners for each Component.

\n\n

Multiple Data Flows

\n\n

A Blocks app is not limited to a single data flow the way Interfaces are. Take a look at the demo below:

\n\n
import gradio as gr\n\ndef increase(num):\n    return num + 1\n\nwith gr.Blocks() as demo:\n    a = gr.Number(label=\"a\")\n    b = gr.Number(label=\"b\")\n    btoa = gr.Button(\"a > b\")\n    atob = gr.Button(\"b > a\")\n    atob.click(increase, a, b)\n    btoa.click(increase, b, a)\n\ndemo.launch()\n
\n\n

\n\n

Note that num1 can act as input to num2, and also vice-versa! As your apps get more complex, you will have many data flows connecting various Components.

\n\n

Here's an example of a \"multi-step\" demo, where the output of one model (a speech-to-text model) gets fed into the next model (a sentiment classifier).

\n\n
from transformers import pipeline\n\nimport gradio as gr\n\nasr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\nclassifier = pipeline(\"text-classification\")\n\n\ndef speech_to_text(speech):\n    text = asr(speech)[\"text\"]\n    return text\n\n\ndef text_to_sentiment(text):\n    return classifier(text)[0][\"label\"]\n\n\ndemo = gr.Blocks()\n\nwith demo:\n    audio_file = gr.Audio(type=\"filepath\")\n    text = gr.Textbox()\n    label = gr.Label()\n\n    b1 = gr.Button(\"Recognize Speech\")\n    b2 = gr.Button(\"Classify Sentiment\")\n\n    b1.click(speech_to_text, inputs=audio_file, outputs=text)\n    b2.click(text_to_sentiment, inputs=text, outputs=label)\n\ndemo.launch()\n\n
\n\n

\n\n

Function Input List vs Dict

\n\n

The event listeners you've seen so far have a single input component. If you'd like to have multiple input components pass data to the function, you have two options on how the function can accept input component values:

\n\n
    \n
  1. as a list of arguments, or
  2. \n
  3. as a single dictionary of values, keyed by the component
  4. \n
\n\n

Let's see an example of each:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    a = gr.Number(label=\"a\")\n    b = gr.Number(label=\"b\")\n    with gr.Row():\n        add_btn = gr.Button(\"Add\")\n        sub_btn = gr.Button(\"Subtract\")\n    c = gr.Number(label=\"sum\")\n\n    def add(num1, num2):\n        return num1 + num2\n    add_btn.click(add, inputs=[a, b], outputs=c)\n\n    def sub(data):\n        return data[a] - data[b]\n    sub_btn.click(sub, inputs={a, b}, outputs=c)\n\n\ndemo.launch()\n
\n\n

Both add() and sub() take a and b as inputs. However, the syntax is different between these listeners.

\n\n
    \n
  1. To the add_btn listener, we pass the inputs as a list. The function add() takes each of these inputs as arguments. The value of a maps to the argument num1, and the value of b maps to the argument num2.
  2. \n
  3. To the sub_btn listener, we pass the inputs as a set (note the curly brackets!). The function sub() takes a single dictionary argument data, where the keys are the input components, and the values are the values of those components.
  4. \n
\n\n

It is a matter of preference which syntax you prefer! For functions with many input components, option 2 may be easier to manage.

\n\n

\n\n

Function Return List vs Dict

\n\n

Similarly, you may return values for multiple output components either as:

\n\n
    \n
  1. a list of values, or
  2. \n
  3. a dictionary keyed by the component
  4. \n
\n\n

Let's first see an example of (1), where we set the values of two output components by returning two values:

\n\n
with gr.Blocks() as demo:\n    food_box = gr.Number(value=10, label=\"Food Count\")\n    status_box = gr.Textbox()\n    def eat(food):\n        if food > 0:\n            return food - 1, \"full\"\n        else:\n            return 0, \"hungry\"\n    gr.Button(\"EAT\").click(\n        fn=eat, \n        inputs=food_box,\n        outputs=[food_box, status_box]\n    )\n
\n\n

Above, each return statement returns two values corresponding to food_box and status_box, respectively.

\n\n

Instead of returning a list of values corresponding to each output component in order, you can also return a dictionary, with the key corresponding to the output component and the value as the new value. This also allows you to skip updating some output components.

\n\n
with gr.Blocks() as demo:\n    food_box = gr.Number(value=10, label=\"Food Count\")\n    status_box = gr.Textbox()\n    def eat(food):\n        if food > 0:\n            return {food_box: food - 1, status_box: \"full\"}\n        else:\n            return {status_box: \"hungry\"}\n    gr.Button(\"EAT\").click(\n        fn=eat, \n        inputs=food_box,\n        outputs=[food_box, status_box]\n    )\n
\n\n

Notice how when there is no food, we only update the status_box element. We skipped updating the food_box component.

\n\n

Dictionary returns are helpful when an event listener affects many components on return, or conditionally affects outputs and not others.

\n\n

Keep in mind that with dictionary returns, we still need to specify the possible outputs in the event listener.

\n\n

Updating Component Configurations

\n\n

The return value of an event listener function is usually the updated value of the corresponding output Component. Sometimes we want to update the configuration of the Component as well, such as the visibility. In this case, we return a gr.update() object instead of just the update Component value.

\n\n
import gradio as gr\n\ndef change_textbox(choice):\n    if choice == \"short\":\n        return gr.update(lines=2, visible=True, value=\"Short story: \")\n    elif choice == \"long\":\n        return gr.update(lines=8, visible=True, value=\"Long story...\")\n    else:\n        return gr.update(visible=False)\n\nwith gr.Blocks() as demo:\n    radio = gr.Radio(\n        [\"short\", \"long\", \"none\"], label=\"Essay Length to Write?\"\n    )\n    text = gr.Textbox(lines=2, interactive=True)\n    radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\ndemo.launch()\n
\n\n

\n\n

See how we can configure the Textbox itself through the gr.update() method. The value= argument can still be used to update the value along with Component configuration.

\n\n

Running Events Consecutively

\n\n

You can also run events consecutively by using the then method of an event listener. This will run an event after the previous event has finished running. This is useful for running events that update components in multiple steps.

\n\n

For example, in the chatbot example below, we first update the chatbot with the user message immediately, and then update the chatbot with the computer response after a simulated delay.

\n\n
import gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.Button(\"Clear\")\n\n    def user(user_message, history):\n        return \"\", history + [[user_message, None]]\n\n    def bot(history):\n        bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n        time.sleep(2)\n        history[-1][1] = bot_message\n        return history\n\n    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.queue()\ndemo.launch()\n\n
\n\n

\n\n

The .then() method of an event listener executes the subsequent event regardless of whether the previous event raised any errors. If you'd like to only run subsequent events if the previous event executed successfully, use the .success() method, which takes the same arguments as .then().

\n\n

Running Events Continuously

\n\n

You can run events on a fixed schedule using the every parameter of the event listener. This will run the event\nevery number of seconds while the client connection is open. If the connection is closed, the event will stop running after the following iteration.\nNote that this does not take into account the runtime of the event itself. So a function\nwith a 1 second runtime running with every=5, would actually run every 6 seconds.

\n\n

Here is an example of a sine curve that updates every second!

\n\n
import math\nimport gradio as gr\nimport plotly.express as px\nimport numpy as np\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n    global plot_end\n    x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n    y = np.sin(2*math.pi*period * x)\n    fig = px.line(x=x, y=y)\n    plot_end += 2 * math.pi\n    if plot_end > 1000:\n        plot_end = 2 * math.pi\n    return fig\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n            period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n            plot = gr.Plot(label=\"Plot (updates every half second)\")\n\n    dep = demo.load(get_plot, None, plot, every=1)\n    period.change(get_plot, period, plot, every=1, cancels=[dep])\n\n\nif __name__ == \"__main__\":\n    demo.queue().launch()\n\n
\n\n

\n\n

Gathering Event Data

\n\n

You can gather specific data about an event by adding the associated event data class as a type hint to an argument in the event listener function.

\n\n

For example, event data for .select() can be type hinted by a gradio.SelectData argument. This event is triggered when a user selects some part of the triggering component, and the event data includes information about what the user specifically selected. If a user selected a specific word in a Textbox, a specific image in a Gallery, or a specific cell in a DataFrame, the event data argument would contain information about the specific selection.

\n\n

In the 2 player tic-tac-toe demo below, a user can select a cell in the DataFrame to make a move. The event data argument contains information about the specific cell that was selected. We can first check to see if the cell is empty, and then update the cell with the user's move.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n    board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n\n    def place(board, turn, evt: gr.SelectData):\n        if evt.value:\n            return board, turn\n        board[evt.index[0]][evt.index[1]] = turn\n        turn = \"O\" if turn == \"X\" else \"X\"\n        return board, turn\n\n    board.select(place, [board, turn], [board, turn])\n\ndemo.launch()\n
\n\n

\n", "tags": [], "spaces": [], "url": "/guides/blocks-and-event-listeners/", "contributor": null}, {"name": "controlling-layout", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 2, "absolute_index": 9, "pretty_name": "Controlling Layout", "content": "# Controlling Layout\n\nBy default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the [flexbox model of web development](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Flexible_Box_Layout/Basic_Concepts_of_Flexbox).\n\n## Rows\n\nElements within a `with gr.Row` clause will all be displayed horizontally. For example, to display two Buttons side by side:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")\n```\n\nTo make every element in a Row have the same height, use the `equal_height` argument of the `style` method.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row().style(equal_height=True):\n textbox = gr.Textbox()\n btn2 = gr.Button(\"Button 2\")\n```\n\nThe widths of elements in a Row can be controlled via a combination of `scale` and `min_width` arguments that are present in every Component.\n\n- `scale` is an integer that defines how an element will take up space in a Row. If scale is set to `0`, and element will not expand to take up space. If scale is set to `1` or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, `btn1` will expand twice as much as `btn2`, while `btn0` will not expand at all:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn0 = gr.Button(\"Button 0\", scale=0)\n btn1 = gr.Button(\"Button 1\", scale=1)\n btn2 = gr.Button(\"Button 2\", scale=2)\n```\n\n- `min_width` will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all `min_width` values.\n\nLearn more about Rows in the [docs](https://gradio.app/docs/#row).\n\n## Columns and Nesting\n\nComponents within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text1 = gr.Textbox(label=\"t1\")\n slider2 = gr.Textbox(label=\"s2\")\n drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n with gr.Row():\n with gr.Column(scale=1, min_width=600):\n text1 = gr.Textbox(label=\"prompt 1\")\n text2 = gr.Textbox(label=\"prompt 2\")\n inbtw = gr.Button(\"Between\")\n text4 = gr.Textbox(label=\"prompt 1\")\n text5 = gr.Textbox(label=\"prompt 2\")\n with gr.Column(scale=2, min_width=600):\n img1 = gr.Image(\"images/cheetah.jpg\")\n btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n```\n\n\nSee how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the `scale` parameter. The column with twice the `scale` value takes up twice the width.\n\nLearn more about Columns in the [docs](https://gradio.app/docs/#column).\n\n## Tabs and Accordions\n\nYou can also create Tabs using the `with gr.Tab('tab_name'):` clause. Any component created inside of a `with gr.Tab('tab_name'):` context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.\n\nFor example:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nAlso note the `gr.Accordion('label')` in this example. The Accordion is a layout that can be toggled open or closed. Like `Tabs`, it is a layout element that can selectively hide or show content. Any components that are defined inside of a `with gr.Accordion('label'):` will be hidden or shown when the accordion's toggle icon is clicked.\n\nLearn more about [Tabs](https://gradio.app/docs/#tab) and [Accordions](https://gradio.app/docs/#accordion) in the docs.\n\n## Visibility\n\nBoth Components and Layout elements have a `visible` argument that can set initially and also updated using `gr.update()`. Setting `gr.update(visible=...)` on a Column can be used to show or hide a set of Components.\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n error_box = gr.Textbox(label=\"Error\", visible=False)\n\n name_box = gr.Textbox(label=\"Name\")\n age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n submit_btn = gr.Button(\"Submit\")\n\n with gr.Column(visible=False) as output_col:\n diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n def submit(name, age, symptoms):\n if len(name) == 0:\n return {error_box: gr.update(value=\"Enter name\", visible=True)}\n return {\n output_col: gr.update(visible=True),\n diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n patient_summary_box: f\"{name}, {age} y/o\",\n }\n\n submit_btn.click(\n submit,\n [name_box, age_box, symptoms_box],\n [error_box, diagnosis_box, patient_summary_box, output_col],\n )\n\ndemo.launch()\n\n```\n\n\n## Variable Number of Outputs\n\nBy adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a *variable numbers of outputs*. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:\n\n```python\nimport gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n textboxes = []\n for i in range(max_textboxes):\n t = gr.Textbox(f\"Textbox {i}\")\n textboxes.append(t)\n\n s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n```\n\n\n## Defining and Rendering Components Separately\n\nIn some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using `gr.Examples` above the corresponding `gr.Textbox` input. Since `gr.Examples` requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the `gr.Examples` object.\n\nThe solution to this is to define the `gr.Textbox` outside of the `gr.Blocks()` scope and use the component's `.render()` method wherever you'd like it placed in the UI.\n\nHere's a full code example:\n\n```python\ninput_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n input_textbox.render()\n```\n", "html": "

Controlling Layout

\n\n

By default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the flexbox model of web development.

\n\n

Rows

\n\n

Elements within a with gr.Row clause will all be displayed horizontally. For example, to display two Buttons side by side:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn1 = gr.Button(\"Button 1\")\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

To make every element in a Row have the same height, use the equal_height argument of the style method.

\n\n
with gr.Blocks() as demo:\n    with gr.Row().style(equal_height=True):\n        textbox = gr.Textbox()\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

The widths of elements in a Row can be controlled via a combination of scale and min_width arguments that are present in every Component.

\n\n
    \n
  • scale is an integer that defines how an element will take up space in a Row. If scale is set to 0, and element will not expand to take up space. If scale is set to 1 or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, btn1 will expand twice as much as btn2, while btn0 will not expand at all:
  • \n
\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn0 = gr.Button(\"Button 0\", scale=0)\n        btn1 = gr.Button(\"Button 1\", scale=1)\n        btn2 = gr.Button(\"Button 2\", scale=2)\n
\n\n
    \n
  • min_width will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all min_width values.
  • \n
\n\n

Learn more about Rows in the docs.

\n\n

Columns and Nesting

\n\n

Components within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        text1 = gr.Textbox(label=\"t1\")\n        slider2 = gr.Textbox(label=\"s2\")\n        drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n    with gr.Row():\n        with gr.Column(scale=1, min_width=600):\n            text1 = gr.Textbox(label=\"prompt 1\")\n            text2 = gr.Textbox(label=\"prompt 2\")\n            inbtw = gr.Button(\"Between\")\n            text4 = gr.Textbox(label=\"prompt 1\")\n            text5 = gr.Textbox(label=\"prompt 2\")\n        with gr.Column(scale=2, min_width=600):\n            img1 = gr.Image(\"images/cheetah.jpg\")\n            btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n
\n\n

\n\n

See how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the scale parameter. The column with twice the scale value takes up twice the width.

\n\n

Learn more about Columns in the docs.

\n\n

Tabs and Accordions

\n\n

You can also create Tabs using the with gr.Tab('tab_name'): clause. Any component created inside of a with gr.Tab('tab_name'): context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.

\n\n

For example:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

Also note the gr.Accordion('label') in this example. The Accordion is a layout that can be toggled open or closed. Like Tabs, it is a layout element that can selectively hide or show content. Any components that are defined inside of a with gr.Accordion('label'): will be hidden or shown when the accordion's toggle icon is clicked.

\n\n

Learn more about Tabs and Accordions in the docs.

\n\n

Visibility

\n\n

Both Components and Layout elements have a visible argument that can set initially and also updated using gr.update(). Setting gr.update(visible=...) on a Column can be used to show or hide a set of Components.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    error_box = gr.Textbox(label=\"Error\", visible=False)\n\n    name_box = gr.Textbox(label=\"Name\")\n    age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n    symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n    submit_btn = gr.Button(\"Submit\")\n\n    with gr.Column(visible=False) as output_col:\n        diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n        patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n    def submit(name, age, symptoms):\n        if len(name) == 0:\n            return {error_box: gr.update(value=\"Enter name\", visible=True)}\n        return {\n            output_col: gr.update(visible=True),\n            diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n            patient_summary_box: f\"{name}, {age} y/o\",\n        }\n\n    submit_btn.click(\n        submit,\n        [name_box, age_box, symptoms_box],\n        [error_box, diagnosis_box, patient_summary_box, output_col],\n    )\n\ndemo.launch()\n\n
\n\n

\n\n

Variable Number of Outputs

\n\n

By adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a variable numbers of outputs. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:

\n\n
import gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n    k = int(k)\n    return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n    s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n    textboxes = []\n    for i in range(max_textboxes):\n        t = gr.Textbox(f\"Textbox {i}\")\n        textboxes.append(t)\n\n    s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n   demo.launch()\n\n
\n\n

\n\n

Defining and Rendering Components Separately

\n\n

In some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using gr.Examples above the corresponding gr.Textbox input. Since gr.Examples requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the gr.Examples object.

\n\n

The solution to this is to define the gr.Textbox outside of the gr.Blocks() scope and use the component's .render() method wherever you'd like it placed in the UI.

\n\n

Here's a full code example:

\n\n
input_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n    gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n    input_textbox.render()\n
\n", "tags": [], "spaces": [], "url": "/guides/controlling-layout/", "contributor": null}, {"name": "state-in-blocks", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 3, "absolute_index": 10, "pretty_name": "State In Blocks", "content": "# State in Blocks\n\nWe covered [State in Interfaces](https://gradio.app/interface-state), this guide takes a look at state in Blocks, which works mostly the same. \n\n## Global State\n\nGlobal state in Blocks works the same as in Interface. Any variable created outside a function call is a reference shared between all users.\n\n## Session State\n\nGradio supports session **state**, where data persists across multiple submits within a page session, in Blocks apps as well. To reiterate, session data is *not* shared between different users of your model. To store data in a session state, you need to do three things:\n\n1. Create a `gr.State()` object. If there is a default value to this stateful object, pass that into the constructor.\n2. In the event listener, put the `State` object as an input and output.\n3. In the event listener function, add the variable to the input parameters and the return value.\n\nLet's take a look at a game of hangman. \n\n```python\nimport gradio as gr\n\nsecret_word = \"gradio\"\n\nwith gr.Blocks() as demo: \n used_letters_var = gr.State([])\n with gr.Row() as row:\n with gr.Column():\n input_letter = gr.Textbox(label=\"Enter letter\")\n btn = gr.Button(\"Guess Letter\")\n with gr.Column():\n hangman = gr.Textbox(\n label=\"Hangman\",\n value=\"_\"*len(secret_word)\n )\n used_letters_box = gr.Textbox(label=\"Used Letters\")\n\n def guess_letter(letter, used_letters):\n used_letters.append(letter)\n answer = \"\".join([\n (letter if letter in used_letters else \"_\")\n for letter in secret_word\n ])\n return {\n used_letters_var: used_letters,\n used_letters_box: \", \".join(used_letters),\n hangman: answer\n }\n btn.click(\n guess_letter, \n [input_letter, used_letters_var],\n [used_letters_var, used_letters_box, hangman]\n )\ndemo.launch()\n```\n\n\nLet's see how we do each of the 3 steps listed above in this game:\n\n1. We store the used letters in `used_letters_var`. In the constructor of `State`, we set the initial value of this to `[]`, an empty list. \n2. In `btn.click()`, we have a reference to `used_letters_var` in both the inputs and outputs.\n3. In `guess_letter`, we pass the value of this `State` to `used_letters`, and then return an updated value of this `State` in the return statement.\n\nWith more complex apps, you will likely have many State variables storing session state in a single Blocks app.\n\nLearn more about `State` in the [docs](https://gradio.app/docs#state).\n\n\n\n", "html": "

State in Blocks

\n\n

We covered State in Interfaces, this guide takes a look at state in Blocks, which works mostly the same.

\n\n

Global State

\n\n

Global state in Blocks works the same as in Interface. Any variable created outside a function call is a reference shared between all users.

\n\n

Session State

\n\n

Gradio supports session state, where data persists across multiple submits within a page session, in Blocks apps as well. To reiterate, session data is not shared between different users of your model. To store data in a session state, you need to do three things:

\n\n
    \n
  1. Create a gr.State() object. If there is a default value to this stateful object, pass that into the constructor.
  2. \n
  3. In the event listener, put the State object as an input and output.
  4. \n
  5. In the event listener function, add the variable to the input parameters and the return value.
  6. \n
\n\n

Let's take a look at a game of hangman.

\n\n
import gradio as gr\n\nsecret_word = \"gradio\"\n\nwith gr.Blocks() as demo:    \n    used_letters_var = gr.State([])\n    with gr.Row() as row:\n        with gr.Column():\n            input_letter = gr.Textbox(label=\"Enter letter\")\n            btn = gr.Button(\"Guess Letter\")\n        with gr.Column():\n            hangman = gr.Textbox(\n                label=\"Hangman\",\n                value=\"_\"*len(secret_word)\n            )\n            used_letters_box = gr.Textbox(label=\"Used Letters\")\n\n    def guess_letter(letter, used_letters):\n        used_letters.append(letter)\n        answer = \"\".join([\n            (letter if letter in used_letters else \"_\")\n            for letter in secret_word\n        ])\n        return {\n            used_letters_var: used_letters,\n            used_letters_box: \", \".join(used_letters),\n            hangman: answer\n        }\n    btn.click(\n        guess_letter, \n        [input_letter, used_letters_var],\n        [used_letters_var, used_letters_box, hangman]\n        )\ndemo.launch()\n
\n\n

\n\n

Let's see how we do each of the 3 steps listed above in this game:

\n\n
    \n
  1. We store the used letters in used_letters_var. In the constructor of State, we set the initial value of this to [], an empty list.
  2. \n
  3. In btn.click(), we have a reference to used_letters_var in both the inputs and outputs.
  4. \n
  5. In guess_letter, we pass the value of this State to used_letters, and then return an updated value of this State in the return statement.
  6. \n
\n\n

With more complex apps, you will likely have many State variables storing session state in a single Blocks app.

\n\n

Learn more about State in the docs.

\n", "tags": [], "spaces": [], "url": "/guides/state-in-blocks/", "contributor": null}, {"name": "custom-CSS-and-JS", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 4, "absolute_index": 11, "pretty_name": "Custom CSS And JS", "content": "# Custom JS and CSS\n\nThis guide covers how to style Blocks with more flexibility, as well as adding Javascript code to event listeners. \n\n**Warning**: The use of query selectors in custom JS and CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using query selectors sparingly.\n\n## Custom CSS\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Blocks` constructor. For example:\n\n```python\nwith gr.Blocks(theme=gr.themes.Glass()):\n ...\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\n\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n```python\nwith gr.Blocks(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\nwith gr.Blocks(css=\".gradio-container {background: url('file=clouds.jpg')}\") as demo:\n ...\n```\n\nYou can also pass the filepath to a CSS file to the `css` argument. \n\n## The `elem_id` and `elem_classes` Arguments\n\nYou can `elem_id` to add an HTML element `id` to any component, and `elem_classes` to add a class or list of classes. This will allow you to select elements more easily with CSS. This approach is also more likely to be stable across Gradio versions as built-in class names or ids may change (however, as mentioned in the warning above, we cannot guarantee complete compatibility between Gradio versions if you use custom CSS as the DOM elements may themselves change).\n\n```python\ncss = \"\"\"\n#warning {background-color: #FFCCCB} \n.feedback textarea {font-size: 24px !important}\n\"\"\"\n\nwith gr.Blocks(css=css) as demo:\n box1 = gr.Textbox(value=\"Good Job\", elem_classes=\"feedback\")\n box2 = gr.Textbox(value=\"Failure\", elem_id=\"warning\", elem_classes=\"feedback\")\n```\n\nThe CSS `#warning` ruleset will only target the second Textbox, while the `.feedback` ruleset will target both. Note that when targeting classes, you might need to put the `!important` selector to override the default Gradio styles.\n\n## Custom JS\n\nEvent listeners have a `_js` argument that can take a Javascript function as a string and treat it just like a Python event listener function. You can pass both a Javascript function and a Python function (in which case the Javascript function is run first) or only Javascript (and set the Python `fn` to `None`). Take a look at the code below:\n\n```python\nimport gradio as gr\n\nblocks = gr.Blocks()\n\nwith blocks as demo:\n subject = gr.Textbox(placeholder=\"subject\")\n verb = gr.Radio([\"ate\", \"loved\", \"hated\"])\n object = gr.Textbox(placeholder=\"object\")\n\n with gr.Row():\n btn = gr.Button(\"Create sentence.\")\n reverse_btn = gr.Button(\"Reverse sentence.\")\n foo_bar_btn = gr.Button(\"Append foo\")\n reverse_then_to_the_server_btn = gr.Button(\n \"Reverse sentence and send to server.\"\n )\n\n def sentence_maker(w1, w2, w3):\n return f\"{w1} {w2} {w3}\"\n\n output1 = gr.Textbox(label=\"output 1\")\n output2 = gr.Textbox(label=\"verb\")\n output3 = gr.Textbox(label=\"verb reversed\")\n output4 = gr.Textbox(label=\"front end process and then send to backend\")\n\n btn.click(sentence_maker, [subject, verb, object], output1)\n reverse_btn.click(\n None, [subject, verb, object], output2, _js=\"(s, v, o) => o + ' ' + v + ' ' + s\"\n )\n verb.change(lambda x: x, verb, output3, _js=\"(x) => [...x].reverse().join('')\")\n foo_bar_btn.click(None, [], subject, _js=\"(x) => x + ' foo'\")\n\n reverse_then_to_the_server_btn.click(\n sentence_maker,\n [subject, verb, object],\n output4,\n _js=\"(s, v, o) => [s, v, o].map(x => [...x].reverse().join(''))\",\n )\n\ndemo.launch()\n\n```\n", "html": "

Custom JS and CSS

\n\n

This guide covers how to style Blocks with more flexibility, as well as adding Javascript code to event listeners.

\n\n

Warning: The use of query selectors in custom JS and CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using query selectors sparingly.

\n\n

Custom CSS

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Blocks constructor. For example:

\n\n
with gr.Blocks(theme=gr.themes.Glass()):\n    ...\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.

\n\n

The base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Blocks(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
with gr.Blocks(css=\".gradio-container {background: url('file=clouds.jpg')}\") as demo:\n    ...\n
\n\n

You can also pass the filepath to a CSS file to the css argument.

\n\n

The elem_id and elem_classes Arguments

\n\n

You can elem_id to add an HTML element id to any component, and elem_classes to add a class or list of classes. This will allow you to select elements more easily with CSS. This approach is also more likely to be stable across Gradio versions as built-in class names or ids may change (however, as mentioned in the warning above, we cannot guarantee complete compatibility between Gradio versions if you use custom CSS as the DOM elements may themselves change).

\n\n
css = \"\"\"\n#warning {background-color: #FFCCCB} \n.feedback textarea {font-size: 24px !important}\n\"\"\"\n\nwith gr.Blocks(css=css) as demo:\n    box1 = gr.Textbox(value=\"Good Job\", elem_classes=\"feedback\")\n    box2 = gr.Textbox(value=\"Failure\", elem_id=\"warning\", elem_classes=\"feedback\")\n
\n\n

The CSS #warning ruleset will only target the second Textbox, while the .feedback ruleset will target both. Note that when targeting classes, you might need to put the !important selector to override the default Gradio styles.

\n\n

Custom JS

\n\n

Event listeners have a _js argument that can take a Javascript function as a string and treat it just like a Python event listener function. You can pass both a Javascript function and a Python function (in which case the Javascript function is run first) or only Javascript (and set the Python fn to None). Take a look at the code below:

\n\n
import gradio as gr\n\nblocks = gr.Blocks()\n\nwith blocks as demo:\n    subject = gr.Textbox(placeholder=\"subject\")\n    verb = gr.Radio([\"ate\", \"loved\", \"hated\"])\n    object = gr.Textbox(placeholder=\"object\")\n\n    with gr.Row():\n        btn = gr.Button(\"Create sentence.\")\n        reverse_btn = gr.Button(\"Reverse sentence.\")\n        foo_bar_btn = gr.Button(\"Append foo\")\n        reverse_then_to_the_server_btn = gr.Button(\n            \"Reverse sentence and send to server.\"\n        )\n\n    def sentence_maker(w1, w2, w3):\n        return f\"{w1} {w2} {w3}\"\n\n    output1 = gr.Textbox(label=\"output 1\")\n    output2 = gr.Textbox(label=\"verb\")\n    output3 = gr.Textbox(label=\"verb reversed\")\n    output4 = gr.Textbox(label=\"front end process and then send to backend\")\n\n    btn.click(sentence_maker, [subject, verb, object], output1)\n    reverse_btn.click(\n        None, [subject, verb, object], output2, _js=\"(s, v, o) => o + ' ' + v + ' ' + s\"\n    )\n    verb.change(lambda x: x, verb, output3, _js=\"(x) => [...x].reverse().join('')\")\n    foo_bar_btn.click(None, [], subject, _js=\"(x) => x + ' foo'\")\n\n    reverse_then_to_the_server_btn.click(\n        sentence_maker,\n        [subject, verb, object],\n        output4,\n        _js=\"(s, v, o) => [s, v, o].map(x => [...x].reverse().join(''))\",\n    )\n\ndemo.launch()\n\n
\n\n

\n", "tags": [], "spaces": [], "url": "/guides/custom-CSS-and-JS/", "contributor": null}, {"name": "using-blocks-like-functions", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 5, "absolute_index": 12, "pretty_name": "Using Blocks Like Functions", "content": "# Using Gradio Blocks Like Functions\n\n\n\n**Prerequisite**: This Guide builds on the Blocks Introduction. Make sure to [read that guide first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control).\n\n## Introduction\n\nDid you know that apart from being a full-stack machine learning demo, a Gradio Blocks app is also a regular-old python function!?\n\nThis means that if you have a gradio Blocks (or Interface) app called `demo`, you can use `demo` like you would any python function.\n\nSo doing something like `output = demo(\"Hello\", \"friend\")` will run the first event defined in `demo` on the inputs \"Hello\" and \"friend\" and store it\nin the variable `output`.\n\nIf I put you to sleep \ud83e\udd71, please bear with me! By using apps like functions, you can seamlessly compose Gradio apps.\nThe following section will show how.\n\n## Treating Blocks like functions\n\nLet's say we have the following demo that translates english text to german text. \n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"t5-base\")\n\n\ndef translate(text):\n return pipe(text)[0][\"translation_text\"]\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n english = gr.Textbox(label=\"English text\")\n translate_btn = gr.Button(value=\"Translate\")\n with gr.Column():\n german = gr.Textbox(label=\"German Text\")\n\n translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n examples = gr.Examples(examples=[\"I went to the supermarket yesterday.\", \"Helen is a good swimmer.\"],\n inputs=[english])\n\ndemo.launch()\n```\n\nI already went ahead and hosted it in Hugging Face spaces at [gradio/english_translator](https://huggingface.co/spaces/gradio/english_translator).\n\nYou can see the demo below as well:\n\n\n\nNow, let's say you have an app that generates english text, but you wanted to additionally generate german text.\n\nYou could either:\n\n1. Copy the source code of my english-to-german translation and paste it in your app.\n\n2. Load my english-to-german translation in your app and treat it like a normal python function.\n\nOption 1 technically always works, but it often introduces unwanted complexity.\n\nOption 2 lets you borrow the functionality you want without tightly coupling our apps.\n\nAll you have to do is call the `Blocks.load` class method in your source file.\nAfter that, you can use my translation app like a regular python function!\n\nThe following code snippet and demo shows how to use `Blocks.load`.\n\nNote that the variable `english_translator` is my english to german app, but its used in `generate_text` like a regular function.\n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english_translator\")\nenglish_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n\n\ndef generate_text(text):\n english_text = english_generator(text)[0][\"generated_text\"]\n german_text = english_translator(english_text)\n return english_text, german_text\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n seed = gr.Text(label=\"Input Phrase\")\n with gr.Column():\n english = gr.Text(label=\"Generated English Text\")\n german = gr.Text(label=\"Generated German Text\")\n btn = gr.Button(\"Generate\")\n btn.click(generate_text, inputs=[seed], outputs=[english, german])\n gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n\ndemo.launch()\n```\n\n\n\n## How to control which function in the app to use\n\nIf the app you are loading defines more than one function, you can specify which function to use\nwith the `fn_index` and `api_name` parameters.\n\nIn the code for our english to german demo, you'll see the following line:\n\n```python\ntranslate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n```\n\nThe `api_name` gives this function a unique name in our app. You can use this name to tell gradio which\nfunction in the upstream space you want to use:\n\n```python\nenglish_generator(text, api_name=\"translate-to-german\")[0][\"generated_text\"]\n```\n\nYou can also use the `fn_index` parameter.\nImagine my app also defined an english to spanish translation function.\nIn order to use it in our text generation app, we would use the following code:\n\n```python\nenglish_generator(text, fn_index=1)[0][\"generated_text\"]\n```\n\nFunctions in gradio spaces are zero-indexed, so since the spanish translator would be the second function in my space,\nyou would use index 1. \n\n## Parting Remarks\n\nWe showed how treating a Blocks app like a regular python helps you compose functionality across different apps.\nAny Blocks app can be treated like a function, but a powerful pattern is to `load` an app hosted on \n[Hugging Face Spaces](https://huggingface.co/spaces) prior to treating it like a function in your own app.\nYou can also load models hosted on the [Hugging Face Model Hub](https://huggingface.co/models) - see the [Using Hugging Face Integrations](/using_hugging_face_integrations) guide for an example.\n\n### Happy building! \u2692\ufe0f\n", "html": "

Using Gradio Blocks Like Functions

\n\n

Prerequisite: This Guide builds on the Blocks Introduction. Make sure to read that guide first.

\n\n

Introduction

\n\n

Did you know that apart from being a full-stack machine learning demo, a Gradio Blocks app is also a regular-old python function!?

\n\n

This means that if you have a gradio Blocks (or Interface) app called demo, you can use demo like you would any python function.

\n\n

So doing something like output = demo(\"Hello\", \"friend\") will run the first event defined in demo on the inputs \"Hello\" and \"friend\" and store it\nin the variable output.

\n\n

If I put you to sleep \ud83e\udd71, please bear with me! By using apps like functions, you can seamlessly compose Gradio apps.\nThe following section will show how.

\n\n

Treating Blocks like functions

\n\n

Let's say we have the following demo that translates english text to german text.

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"t5-base\")\n\n\ndef translate(text):\n    return pipe(text)[0][\"translation_text\"]\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            english = gr.Textbox(label=\"English text\")\n            translate_btn = gr.Button(value=\"Translate\")\n        with gr.Column():\n            german = gr.Textbox(label=\"German Text\")\n\n    translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n    examples = gr.Examples(examples=[\"I went to the supermarket yesterday.\", \"Helen is a good swimmer.\"],\n                           inputs=[english])\n\ndemo.launch()\n
\n\n

I already went ahead and hosted it in Hugging Face spaces at gradio/english_translator.

\n\n

You can see the demo below as well:

\n\n

\n\n

Now, let's say you have an app that generates english text, but you wanted to additionally generate german text.

\n\n

You could either:

\n\n
    \n
  1. Copy the source code of my english-to-german translation and paste it in your app.

  2. \n
  3. Load my english-to-german translation in your app and treat it like a normal python function.

  4. \n
\n\n

Option 1 technically always works, but it often introduces unwanted complexity.

\n\n

Option 2 lets you borrow the functionality you want without tightly coupling our apps.

\n\n

All you have to do is call the Blocks.load class method in your source file.\nAfter that, you can use my translation app like a regular python function!

\n\n

The following code snippet and demo shows how to use Blocks.load.

\n\n

Note that the variable english_translator is my english to german app, but its used in generate_text like a regular function.

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english_translator\")\nenglish_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n\n\ndef generate_text(text):\n    english_text = english_generator(text)[0][\"generated_text\"]\n    german_text = english_translator(english_text)\n    return english_text, german_text\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            seed = gr.Text(label=\"Input Phrase\")\n        with gr.Column():\n            english = gr.Text(label=\"Generated English Text\")\n            german = gr.Text(label=\"Generated German Text\")\n    btn = gr.Button(\"Generate\")\n    btn.click(generate_text, inputs=[seed], outputs=[english, german])\n    gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n\ndemo.launch()\n
\n\n

\n\n

How to control which function in the app to use

\n\n

If the app you are loading defines more than one function, you can specify which function to use\nwith the fn_index and api_name parameters.

\n\n

In the code for our english to german demo, you'll see the following line:

\n\n
translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n
\n\n

The api_name gives this function a unique name in our app. You can use this name to tell gradio which\nfunction in the upstream space you want to use:

\n\n
english_generator(text, api_name=\"translate-to-german\")[0][\"generated_text\"]\n
\n\n

You can also use the fn_index parameter.\nImagine my app also defined an english to spanish translation function.\nIn order to use it in our text generation app, we would use the following code:

\n\n
english_generator(text, fn_index=1)[0][\"generated_text\"]\n
\n\n

Functions in gradio spaces are zero-indexed, so since the spanish translator would be the second function in my space,\nyou would use index 1.

\n\n

Parting Remarks

\n\n

We showed how treating a Blocks app like a regular python helps you compose functionality across different apps.\nAny Blocks app can be treated like a function, but a powerful pattern is to load an app hosted on \nHugging Face Spaces prior to treating it like a function in your own app.\nYou can also load models hosted on the Hugging Face Model Hub - see the Using Hugging Face Integrations guide for an example.

\n\n

Happy building! \u2692\ufe0f

\n", "tags": ["TRANSLATION", "HUB", "SPACES"], "spaces": [], "url": "/guides/using-blocks-like-functions/", "contributor": null}]}, {"category": "Chatbots", "guides": [{"name": "creating-a-chatbot-fast", "category": "chatbots", "pretty_category": "Chatbots", "guide_index": 1, "absolute_index": 13, "pretty_name": "Creating A Chatbot Fast", "content": "# How to Create a Chatbot with Gradio\n\n\n\n## Introduction\n\nChatbots are a popular application of large language models. Using `gradio`, you can easily build a demo of your chatbot model and share that with your users, or try it yourself using an intuitive chatbot UI.\n\nThis tutorial uses `gr.ChatInterface()`, which is a high-level abstraction that allows you to create your chatbot UI fast, often with a single line of code. The chatbot interface that we create will look something like this:\n\n\n\nWe'll start with a couple of simple examples, and then show how to use `gr.ChatInterface()` with real language models from several popular APIs and libraries, including `langchain`, `openai`, and Hugging Face. \n\n**Prerequisites**: please make sure you are using the **latest version** version of Gradio: \n\n```bash\n$ pip install --upgrade gradio\n```\n\n## Defining a chat function\n\nWhen working with `gr.ChatInterface()`, the first thing you should do is define your chat function. Your chat function should take two arguments: `message` and then `history` (the arguments can be named anything, but must be in this order).\n\n* `message`: a `str` representing the user's input.\n* `history`: a `list` of `list` representing the conversations up until that point. Each inner list consists of two `str` representing a pair: `[user input, bot response]`. \n\nYour function should return a single string response, which is the bot's response to the particular user input `message`. Your function can take into account the `history` of messages, as well as the current message.\n\nLet's take a look at a few examples.\n\n## Example: a chatbot that responds yes or no\n\nLet's write a chat function that responds `Yes` or `No` randomly.\n\nHere's our chat function:\n\n```python\nimport random\n\ndef random_response(message, history):\n return random.choice([\"Yes\", \"No\"])\n```\n\nNow, we can plug this into `gr.ChatInterface()` and call the `.launch()` method to create the web interface:\n\n```python\nimport gradio as gr\n\ngr.ChatInterface(random_response).launch()\n```\n\nThat's it! Here's our running demo, try it out:\n\n\n\n## Another example using the user's input and history\n\nOf course, the previous example was very simplistic, it didn't even take user input or the previous history into account! Here's another simple example showing how to incorporate a user's input as well as the history.\n\n```python\nimport random\nimport gradio as gr\n\ndef alternatingly_agree(message, history):\n if len(history) % 2 == 0:\n return f\"Yes, I do think that '{message}'\"\n else:\n return \"I don't think so\"\n\ngr.ChatInterface(alternatingly_agree).launch()\n```\n\n## Streaming chatbots \n\nIf in your chat function, you use `yield` to generate a sequence of responses, you'll end up with a streaming chatbot. It's that simple!\n\n```python\nimport time\nimport gradio as gr\n\ndef slow_echo(message, history):\n for i in range(len(message)):\n time.sleep(0.3)\n yield \"You typed: \" + message[: i+1]\n\ngr.ChatInterface(slow_echo).queue().launch()\n```\n\nNotice that we've [enabled queuing](/guides/key-features#queuing), which is required to use generator functions. While the response is streaming, the \"Submit\" button turns into a \"Stop\" button that can be used to stop the generator function. You can customize the appearance and behavior of the \"Stop\" button using the `stop_btn` parameter.\n\n## Customizing your chatbot\n\nIf you're familiar with Gradio's `Interface` class, the `gr.ChatInterface` includes many of the same arguments that you can use to customize the look and feel of your Chatbot. For example, you can:\n\n* add a title and description above your chatbot using `title` and `description` arguments.\n* add a theme or custom css using `theme` and `css` arguments respectively.\n* add `examples` and even enable `cache_examples`, which make it easier for users to try it out .\n* You can change the text or disable each of the buttons that appear in the chatbot interface: `submit_btn`, `retry_btn`, `undo_btn`, `clear_btn`.\n\nIf you want to customize the `gr.Chatbot` or `gr.Textbox` that compose the `ChatInterface`, then you can pass in your own chatbot or textbox as well. Here's an example of how we can use these parameters:\n\n\n```python\nimport gradio as gr\n\ndef yes_man(message, history):\n if message.endswith(\"?\"):\n return \"Yes\"\n else:\n return \"Ask me anything!\"\n\ngr.ChatInterface(\n yes_man,\n chatbot=gr.Chatbot(height=300),\n textbox=gr.Textbox(placeholder=\"Ask me a yes or no question\", container=False, scale=7),\n title=\"Yes Man\",\n description=\"Ask Yes Man any question\",\n theme=\"soft\",\n examples=[\"Hello\", \"Am I cool?\", \"Are tomatoes vegetables?\"],\n cache_examples=True,\n retry_btn=None,\n undo_btn=\"Delete Previous\",\n clear_btn=\"Clear\",\n).launch()\n```\n\n## Additional Inputs\n\nYou may want to add additional parameters to your chatbot and expose them to your users through the Chatbot UI. For example, suppose you want to add a textbox for a system prompt, or a slider that sets the number of tokens in the chatbot's response. The `ChatInterface` class supports an `additional_inputs` parameter which can be used to add additional input components.\n\nThe `additional_inputs` parameters accepts a component or a list of components. You can pass the component instances directly, or use their string shortcuts (e.g. `\"textbox\"` instead of `gr.Textbox()`). If you pass in component instances, and they have *not* already been rendered, then the components will appear underneath the chatbot (and any examples) within a `gr.Accordion()`. You can set the label of this accordion using the `additional_inputs_accordion_name` parameter. \n\nHere's a complete example:\n\n```python\nimport gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n for i in range(min(len(response), int(tokens))):\n time.sleep(0.05)\n yield response[: i+1]\n\ndemo = gr.ChatInterface(echo, \n additional_inputs=[\n gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\"), \n gr.Slider(10, 100)\n ]\n )\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n```\n\nIf the components you pass into the `additional_inputs` have already been rendered in a parent `gr.Blocks()`, then they will *not* be re-rendered in the accordion. This provides flexibility in deciding where to lay out the input components. In the example below, we position the `gr.Textbox()` on top of the Chatbot UI, while keeping the slider underneath.\n\n```python\nimport gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n for i in range(min(len(response), int(tokens))):\n time.sleep(0.05)\n yield response[: i+1]\n\nwith gr.Blocks() as demo:\n system_prompt = gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\")\n slider = gr.Slider(10, 100, render=False)\n \n gr.ChatInterface(\n echo, additional_inputs=[system_prompt, slider]\n )\n\ndemo.queue().launch()\n```\n\nIf you need to create something even more custom, then its best to construct the chatbot UI using the low-level `gr.Blocks()` API. We have [a dedicated guide for that here](/guides/creating-a-custom-chatbot-with-blocks).\n\n## Using your chatbot via an API\n\nOnce you've built your Gradio chatbot and are hosting it on [Hugging Face Spaces](https://hf.space) or somewhere else, then you can query it with a simple API at the `/chat` endpoint. The endpoint just expects the user's message (and potentially additional inputs if you have set any using the `additional_inputs` parameter), and will return the response, internally keeping track of the messages sent so far.\n\n[](https://github.com/gradio-app/gradio/assets/1778297/7b10d6db-6476-4e2e-bebd-ecda802c3b8f)\n\nTo use the endpoint, you should use either the [Gradio Python Client](/guides/getting-started-with-the-python-client) or the [Gradio JS client](/guides/getting-started-with-the-js-client).\n\n## A `langchain` example\n\nNow, let's actually use the `gr.ChatInterface` with some real large language models. We'll start by using `langchain` on top of `openai` to build a general-purpose streaming chatbot application in 19 lines of code. You'll need to have an OpenAI key for this example (keep reading for the free, open-source equivalent!)\n\n```python\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import AIMessage, HumanMessage\nimport openai\nimport gradio as gr\n\nos.envrion[\"OPENAI_API_KEY\"] = \"sk-...\" # Replace with your key\n\nllm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')\n\ndef predict(message, history):\n history_langchain_format = []\n for human, ai in history:\n history_langchain_format.append(HumanMessage(content=human))\n history_langchain_format.append(AIMessage(content=ai))\n history_langchain_format.append(HumanMessage(content=message))\n gpt_response = llm(history_langchain_format)\n return gpt_response.content\n\ngr.ChatInterface(predict).launch() \n```\n\n## A streaming example using `openai`\n\nOf course, we could also use the `openai` library directy. Here a similar example, but this time with streaming results as well:\n\n\n```python\nimport openai\nimport gradio as gr\n\nopenai.api_key = \"sk-...\" # Replace with your key\n\ndef predict(message, history):\n history_openai_format = []\n for human, assistant in history:\n history_openai_format.append({\"role\": \"user\", \"content\": human })\n history_openai_format.append({\"role\": \"assistant\", \"content\":assistant})\n history_openai_format.append({\"role\": \"user\", \"content\": message})\n\n response = openai.ChatCompletion.create(\n model='gpt-3.5-turbo',\n messages= history_openai_format, \n temperature=1.0,\n stream=True\n )\n \n partial_message = \"\"\n for chunk in response:\n if len(chunk['choices'][0]['delta']) != 0:\n partial_message = partial_message + chunk['choices'][0]['delta']['content']\n yield partial_message \n\ngr.ChatInterface(predict).queue().launch() \n```\n\n## Example using a local, open-source LLM with Hugging Face\n\nOf course, in many cases you want to run a chatbot locally. Here's the equivalent example using Together's RedePajama model, from Hugging Face (this requires you to have a GPU with CUDA).\n\n```python\nimport gradio as gr\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer\nfrom threading import Thread\n\ntokenizer = AutoTokenizer.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\")\nmodel = AutoModelForCausalLM.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\", torch_dtype=torch.float16)\nmodel = model.to('cuda:0')\n\nclass StopOnTokens(StoppingCriteria):\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n stop_ids = [29, 0]\n for stop_id in stop_ids:\n if input_ids[0][-1] == stop_id:\n return True\n return False\n\ndef predict(message, history): \n\n history_transformer_format = history + [[message, \"\"]]\n stop = StopOnTokens()\n\n messages = \"\".join([\"\".join([\"\\n:\"+item[0], \"\\n:\"+item[1]]) #curr_system_message + \n for item in history_transformer_format])\n \n model_inputs = tokenizer([messages], return_tensors=\"pt\").to(\"cuda\")\n streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)\n generate_kwargs = dict(\n model_inputs,\n streamer=streamer,\n max_new_tokens=1024,\n do_sample=True,\n top_p=0.95,\n top_k=1000,\n temperature=1.0,\n num_beams=1,\n stopping_criteria=StoppingCriteriaList([stop])\n )\n t = Thread(target=model.generate, kwargs=generate_kwargs)\n t.start()\n\n partial_message = \"\"\n for new_token in streamer:\n if new_token != '<':\n partial_message += new_token\n yield partial_message \n \n\ngr.ChatInterface(predict).queue().launch()\n```\n\nWith those examples, you should be all set to create your own Gradio Chatbot demos soon! For building even more custom Chatbot applications, check out [a dedicated guide](/guides/creating-a-custom-chatbot-with-blocks) using the low-level `gr.Blocks()` API.", "html": "

How to Create a Chatbot with Gradio

\n\n

Introduction

\n\n

Chatbots are a popular application of large language models. Using gradio, you can easily build a demo of your chatbot model and share that with your users, or try it yourself using an intuitive chatbot UI.

\n\n

This tutorial uses gr.ChatInterface(), which is a high-level abstraction that allows you to create your chatbot UI fast, often with a single line of code. The chatbot interface that we create will look something like this:

\n\n

\n\n

We'll start with a couple of simple examples, and then show how to use gr.ChatInterface() with real language models from several popular APIs and libraries, including langchain, openai, and Hugging Face.

\n\n

Prerequisites: please make sure you are using the latest version version of Gradio:

\n\n
$ pip install --upgrade gradio\n
\n\n

Defining a chat function

\n\n

When working with gr.ChatInterface(), the first thing you should do is define your chat function. Your chat function should take two arguments: message and then history (the arguments can be named anything, but must be in this order).

\n\n
    \n
  • message: a str representing the user's input.
  • \n
  • history: a list of list representing the conversations up until that point. Each inner list consists of two str representing a pair: [user input, bot response].
  • \n
\n\n

Your function should return a single string response, which is the bot's response to the particular user input message. Your function can take into account the history of messages, as well as the current message.

\n\n

Let's take a look at a few examples.

\n\n

Example: a chatbot that responds yes or no

\n\n

Let's write a chat function that responds Yes or No randomly.

\n\n

Here's our chat function:

\n\n
import random\n\ndef random_response(message, history):\n    return random.choice([\"Yes\", \"No\"])\n
\n\n

Now, we can plug this into gr.ChatInterface() and call the .launch() method to create the web interface:

\n\n
import gradio as gr\n\ngr.ChatInterface(random_response).launch()\n
\n\n

That's it! Here's our running demo, try it out:

\n\n

\n\n

Another example using the user's input and history

\n\n

Of course, the previous example was very simplistic, it didn't even take user input or the previous history into account! Here's another simple example showing how to incorporate a user's input as well as the history.

\n\n
import random\nimport gradio as gr\n\ndef alternatingly_agree(message, history):\n    if len(history) % 2 == 0:\n        return f\"Yes, I do think that '{message}'\"\n    else:\n        return \"I don't think so\"\n\ngr.ChatInterface(alternatingly_agree).launch()\n
\n\n

Streaming chatbots

\n\n

If in your chat function, you use yield to generate a sequence of responses, you'll end up with a streaming chatbot. It's that simple!

\n\n
import time\nimport gradio as gr\n\ndef slow_echo(message, history):\n    for i in range(len(message)):\n        time.sleep(0.3)\n        yield \"You typed: \" + message[: i+1]\n\ngr.ChatInterface(slow_echo).queue().launch()\n
\n\n

Notice that we've enabled queuing, which is required to use generator functions. While the response is streaming, the \"Submit\" button turns into a \"Stop\" button that can be used to stop the generator function. You can customize the appearance and behavior of the \"Stop\" button using the stop_btn parameter.

\n\n

Customizing your chatbot

\n\n

If you're familiar with Gradio's Interface class, the gr.ChatInterface includes many of the same arguments that you can use to customize the look and feel of your Chatbot. For example, you can:

\n\n
    \n
  • add a title and description above your chatbot using title and description arguments.
  • \n
  • add a theme or custom css using theme and css arguments respectively.
  • \n
  • add examples and even enable cache_examples, which make it easier for users to try it out .
  • \n
  • You can change the text or disable each of the buttons that appear in the chatbot interface: submit_btn, retry_btn, undo_btn, clear_btn.
  • \n
\n\n

If you want to customize the gr.Chatbot or gr.Textbox that compose the ChatInterface, then you can pass in your own chatbot or textbox as well. Here's an example of how we can use these parameters:

\n\n
import gradio as gr\n\ndef yes_man(message, history):\n    if message.endswith(\"?\"):\n        return \"Yes\"\n    else:\n        return \"Ask me anything!\"\n\ngr.ChatInterface(\n    yes_man,\n    chatbot=gr.Chatbot(height=300),\n    textbox=gr.Textbox(placeholder=\"Ask me a yes or no question\", container=False, scale=7),\n    title=\"Yes Man\",\n    description=\"Ask Yes Man any question\",\n    theme=\"soft\",\n    examples=[\"Hello\", \"Am I cool?\", \"Are tomatoes vegetables?\"],\n    cache_examples=True,\n    retry_btn=None,\n    undo_btn=\"Delete Previous\",\n    clear_btn=\"Clear\",\n).launch()\n
\n\n

Additional Inputs

\n\n

You may want to add additional parameters to your chatbot and expose them to your users through the Chatbot UI. For example, suppose you want to add a textbox for a system prompt, or a slider that sets the number of tokens in the chatbot's response. The ChatInterface class supports an additional_inputs parameter which can be used to add additional input components.

\n\n

The additional_inputs parameters accepts a component or a list of components. You can pass the component instances directly, or use their string shortcuts (e.g. \"textbox\" instead of gr.Textbox()). If you pass in component instances, and they have not already been rendered, then the components will appear underneath the chatbot (and any examples) within a gr.Accordion(). You can set the label of this accordion using the additional_inputs_accordion_name parameter.

\n\n

Here's a complete example:

\n\n
import gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n    response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n    for i in range(min(len(response), int(tokens))):\n        time.sleep(0.05)\n        yield response[: i+1]\n\ndemo = gr.ChatInterface(echo, \n                        additional_inputs=[\n                            gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\"), \n                            gr.Slider(10, 100)\n                        ]\n                       )\n\nif __name__ == \"__main__\":\n    demo.queue().launch()\n
\n\n

If the components you pass into the additional_inputs have already been rendered in a parent gr.Blocks(), then they will not be re-rendered in the accordion. This provides flexibility in deciding where to lay out the input components. In the example below, we position the gr.Textbox() on top of the Chatbot UI, while keeping the slider underneath.

\n\n
import gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n    response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n    for i in range(min(len(response), int(tokens))):\n        time.sleep(0.05)\n        yield response[: i+1]\n\nwith gr.Blocks() as demo:\n    system_prompt = gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\")\n    slider = gr.Slider(10, 100, render=False)\n\n    gr.ChatInterface(\n        echo, additional_inputs=[system_prompt, slider]\n    )\n\ndemo.queue().launch()\n
\n\n

If you need to create something even more custom, then its best to construct the chatbot UI using the low-level gr.Blocks() API. We have a dedicated guide for that here.

\n\n

Using your chatbot via an API

\n\n

Once you've built your Gradio chatbot and are hosting it on Hugging Face Spaces or somewhere else, then you can query it with a simple API at the /chat endpoint. The endpoint just expects the user's message (and potentially additional inputs if you have set any using the additional_inputs parameter), and will return the response, internally keeping track of the messages sent so far.

\n\n

\n\n

To use the endpoint, you should use either the Gradio Python Client or the Gradio JS client.

\n\n

A langchain example

\n\n

Now, let's actually use the gr.ChatInterface with some real large language models. We'll start by using langchain on top of openai to build a general-purpose streaming chatbot application in 19 lines of code. You'll need to have an OpenAI key for this example (keep reading for the free, open-source equivalent!)

\n\n
from langchain.chat_models import ChatOpenAI\nfrom langchain.schema import AIMessage, HumanMessage\nimport openai\nimport gradio as gr\n\nos.envrion[\"OPENAI_API_KEY\"] = \"sk-...\"  # Replace with your key\n\nllm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')\n\ndef predict(message, history):\n    history_langchain_format = []\n    for human, ai in history:\n        history_langchain_format.append(HumanMessage(content=human))\n        history_langchain_format.append(AIMessage(content=ai))\n    history_langchain_format.append(HumanMessage(content=message))\n    gpt_response = llm(history_langchain_format)\n    return gpt_response.content\n\ngr.ChatInterface(predict).launch() \n
\n\n

A streaming example using openai

\n\n

Of course, we could also use the openai library directy. Here a similar example, but this time with streaming results as well:

\n\n
import openai\nimport gradio as gr\n\nopenai.api_key = \"sk-...\"  # Replace with your key\n\ndef predict(message, history):\n    history_openai_format = []\n    for human, assistant in history:\n        history_openai_format.append({\"role\": \"user\", \"content\": human })\n        history_openai_format.append({\"role\": \"assistant\", \"content\":assistant})\n    history_openai_format.append({\"role\": \"user\", \"content\": message})\n\n    response = openai.ChatCompletion.create(\n        model='gpt-3.5-turbo',\n        messages= history_openai_format,         \n        temperature=1.0,\n        stream=True\n    )\n\n    partial_message = \"\"\n    for chunk in response:\n        if len(chunk['choices'][0]['delta']) != 0:\n            partial_message = partial_message + chunk['choices'][0]['delta']['content']\n            yield partial_message \n\ngr.ChatInterface(predict).queue().launch() \n
\n\n

Example using a local, open-source LLM with Hugging Face

\n\n

Of course, in many cases you want to run a chatbot locally. Here's the equivalent example using Together's RedePajama model, from Hugging Face (this requires you to have a GPU with CUDA).

\n\n
import gradio as gr\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer\nfrom threading import Thread\n\ntokenizer = AutoTokenizer.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\")\nmodel = AutoModelForCausalLM.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\", torch_dtype=torch.float16)\nmodel = model.to('cuda:0')\n\nclass StopOnTokens(StoppingCriteria):\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n        stop_ids = [29, 0]\n        for stop_id in stop_ids:\n            if input_ids[0][-1] == stop_id:\n                return True\n        return False\n\ndef predict(message, history): \n\n    history_transformer_format = history + [[message, \"\"]]\n    stop = StopOnTokens()\n\n    messages = \"\".join([\"\".join([\"\\n:\"+item[0], \"\\n:\"+item[1]])  #curr_system_message + \n                for item in history_transformer_format])\n\n    model_inputs = tokenizer([messages], return_tensors=\"pt\").to(\"cuda\")\n    streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)\n    generate_kwargs = dict(\n        model_inputs,\n        streamer=streamer,\n        max_new_tokens=1024,\n        do_sample=True,\n        top_p=0.95,\n        top_k=1000,\n        temperature=1.0,\n        num_beams=1,\n        stopping_criteria=StoppingCriteriaList([stop])\n        )\n    t = Thread(target=model.generate, kwargs=generate_kwargs)\n    t.start()\n\n    partial_message  = \"\"\n    for new_token in streamer:\n        if new_token != '<':\n            partial_message += new_token\n            yield partial_message \n\n\ngr.ChatInterface(predict).queue().launch()\n
\n\n

With those examples, you should be all set to create your own Gradio Chatbot demos soon! For building even more custom Chatbot applications, check out a dedicated guide using the low-level gr.Blocks() API.

\n", "tags": ["NLP", "TEXT", "CHAT"], "spaces": [], "url": "/guides/creating-a-chatbot-fast/", "contributor": null}, {"name": "creating-a-custom-chatbot-with-blocks", "category": "chatbots", "pretty_category": "Chatbots", "guide_index": 2, "absolute_index": 14, "pretty_name": "Creating A Custom Chatbot With Blocks", "content": "# How to Create a Custom Chatbot with Gradio Blocks\n\n\n\n\n## Introduction\n\n**Important Note**: if you are getting started, we recommend using the `gr.ChatInterface` to create chatbots -- its a high-level abstraction that makes it possible to create beautiful chatbot applications fast, often with a single line of code. [Read more about it here](/guides/creating-a-chatbot-fast).\n\nThis tutorial will show how to make chatbot UIs from scratch with Gradio's low-level Blocks API. This will give you full control over your Chatbot UI. You'll start by first creating a a simple chatbot to display text, a second one to stream text responses, and finally a chatbot that can handle media files as well. The chatbot interface that we create will look something like this:\n\n\n\n**Prerequisite**: We'll be using the `gradio.Blocks` class to build our Chatbot demo.\nYou can [read the Guide to Blocks first](https://gradio.app/quickstart/#blocks-more-flexibility-and-control) if you are not already familiar with it. Also please make sure you are using the **latest version** version of Gradio: `pip install --upgrade gradio`. \n\n## A Simple Chatbot Demo\n\nLet's start with recreating the simple demo above. As you may have noticed, our bot simply randomly responds \"How are you?\", \"I love you\", or \"I'm very hungry\" to any input. Here's the code to create this with Gradio:\n\n```python\nimport gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.ClearButton([msg, chatbot])\n\n def respond(message, chat_history):\n bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n chat_history.append((message, bot_message))\n time.sleep(2)\n return \"\", chat_history\n\n msg.submit(respond, [msg, chatbot], [msg, chatbot])\n\ndemo.launch()\n\n```\n\nThere are three Gradio components here:\n\n* A `Chatbot`, whose value stores the entire history of the conversation, as a list of response pairs between the user and bot.\n* A `Textbox` where the user can type their message, and then hit enter/submit to trigger the chatbot response\n* A `ClearButton` button to clear the Textbox and entire Chatbot history\n\nWe have a single function, `respond()`, which takes in the entire history of the chatbot, appends a random message, waits 1 second, and then returns the updated chat history. The `respond()` function also clears the textbox when it returns. \n\nOf course, in practice, you would replace `respond()` with your own more complex function, which might call a pretrained model or an API, to generate a response.\n\n\n\n\n## Add Streaming to your Chatbot\n\nThere are several ways we can improve the user experience of the chatbot above. First, we can stream responses so the user doesn't have to wait as long for a message to be generated. Second, we can have the user message appear immediately in the chat history, while the chatbot's response is being generated. Here's the code to achieve that: \n\n```python\nimport gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.Button(\"Clear\")\n\n def user(user_message, history):\n return \"\", history + [[user_message, None]]\n\n def bot(history):\n bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n history[-1][1] = \"\"\n for character in bot_message:\n history[-1][1] += character\n time.sleep(0.05)\n yield history\n\n msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n clear.click(lambda: None, None, chatbot, queue=False)\n \ndemo.queue()\ndemo.launch()\n\n```\n\n\nYou'll notice that when a user submits their message, we now *chain* three event events with `.then()`:\n\n1. The first method `user()` updates the chatbot with the user message and clears the input field. This method also makes the input field non interactive so that the user can't send another message while the chatbot is responding. Because we want this to happen instantly, we set `queue=False`, which would skip any queue had it been enabled. The chatbot's history is appended with `(user_message, None)`, the `None` signifying that the bot has not responded.\n\n2. The second method, `bot()` updates the chatbot history with the bot's response. Instead of creating a new message, we just replace the previously-created `None` message with the bot's response. Finally, we construct the message character by character and `yield` the intermediate outputs as they are being constructed. Gradio automatically turns any function with the `yield` keyword [into a streaming output interface](/guides/key-features/#iterative-outputs).\n\n3. The third method makes the input field interactive again so that users can send another message to the bot.\n\nOf course, in practice, you would replace `bot()` with your own more complex function, which might call a pretrained model or an API, to generate a response.\n\nFinally, we enable queuing by running `demo.queue()`, which is required for streaming intermediate outputs. You can try the improved chatbot by scrolling to the demo at the top of this page.\n\n## Adding Markdown, Images, Audio, or Videos\n\nThe `gr.Chatbot` component supports a subset of markdown including bold, italics, and code. For example, we could write a function that responds to a user's message, with a bold **That's cool!**, like this:\n\n```py\ndef bot(history):\n response = \"**That's cool!**\"\n history[-1][1] = response\n return history\n```\n\nIn addition, it can handle media files, such as images, audio, and video. To pass in a media file, we must pass in the file as a tuple of two strings, like this: `(filepath, alt_text)`. The `alt_text` is optional, so you can also just pass in a tuple with a single element `(filepath,)`, like this:\n\n```python\ndef add_file(history, file):\n history = history + [((file.name,), None)]\n return history\n```\n\nPutting this together, we can create a *multimodal* chatbot with a textbox for a user to submit text and an file upload button to submit images / audio / video files. The rest of the code looks pretty much the same as before:\n\n```python\nimport gradio as gr\nimport random\nimport time\n\n# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n\ndef add_text(history, text):\n history = history + [(text, None)]\n return history, gr.update(value=\"\", interactive=False)\n\n\ndef add_file(history, file):\n history = history + [((file.name,), None)]\n return history\n\n\ndef bot(history):\n response = \"**That's cool!**\"\n history[-1][1] = \"\"\n for character in response:\n history[-1][1] += character\n time.sleep(0.05)\n yield history\n\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot([], elem_id=\"chatbot\").style(height=750)\n\n with gr.Row():\n with gr.Column(scale=0.85):\n txt = gr.Textbox(\n show_label=False,\n placeholder=\"Enter text and press enter, or upload an image\",\n ).style(container=False)\n with gr.Column(scale=0.15, min_width=0):\n btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n\n txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n bot, chatbot, chatbot\n )\n txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n\ndemo.queue()\ndemo.launch()\n\n```\n\n\nAnd you're done! That's all the code you need to build an interface for your chatbot model. Finally, we'll end our Guide with some links to Chatbots that are running on Spaces so that you can get an idea of what else is possible:\n\n* [project-baize/Baize-7B](https://huggingface.co/spaces/project-baize/Baize-7B): A stylized chatbot that allows you to stop generation as well as regenerate responses. \n* [MAGAer13/mPLUG-Owl](https://huggingface.co/spaces/MAGAer13/mPLUG-Owl): A multimodal chatbot that allows you to upvote and downvote responses. \n", "html": "

How to Create a Custom Chatbot with Gradio Blocks

\n\n

Introduction

\n\n

Important Note: if you are getting started, we recommend using the gr.ChatInterface to create chatbots -- its a high-level abstraction that makes it possible to create beautiful chatbot applications fast, often with a single line of code. Read more about it here.

\n\n

This tutorial will show how to make chatbot UIs from scratch with Gradio's low-level Blocks API. This will give you full control over your Chatbot UI. You'll start by first creating a a simple chatbot to display text, a second one to stream text responses, and finally a chatbot that can handle media files as well. The chatbot interface that we create will look something like this:

\n\n

\n\n

Prerequisite: We'll be using the gradio.Blocks class to build our Chatbot demo.\nYou can read the Guide to Blocks first if you are not already familiar with it. Also please make sure you are using the latest version version of Gradio: pip install --upgrade gradio.

\n\n

A Simple Chatbot Demo

\n\n

Let's start with recreating the simple demo above. As you may have noticed, our bot simply randomly responds \"How are you?\", \"I love you\", or \"I'm very hungry\" to any input. Here's the code to create this with Gradio:

\n\n
import gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.ClearButton([msg, chatbot])\n\n    def respond(message, chat_history):\n        bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n        chat_history.append((message, bot_message))\n        time.sleep(2)\n        return \"\", chat_history\n\n    msg.submit(respond, [msg, chatbot], [msg, chatbot])\n\ndemo.launch()\n\n
\n\n

There are three Gradio components here:

\n\n
    \n
  • A Chatbot, whose value stores the entire history of the conversation, as a list of response pairs between the user and bot.
  • \n
  • A Textbox where the user can type their message, and then hit enter/submit to trigger the chatbot response
  • \n
  • A ClearButton button to clear the Textbox and entire Chatbot history
  • \n
\n\n

We have a single function, respond(), which takes in the entire history of the chatbot, appends a random message, waits 1 second, and then returns the updated chat history. The respond() function also clears the textbox when it returns.

\n\n

Of course, in practice, you would replace respond() with your own more complex function, which might call a pretrained model or an API, to generate a response.

\n\n

\n\n

Add Streaming to your Chatbot

\n\n

There are several ways we can improve the user experience of the chatbot above. First, we can stream responses so the user doesn't have to wait as long for a message to be generated. Second, we can have the user message appear immediately in the chat history, while the chatbot's response is being generated. Here's the code to achieve that:

\n\n
import gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.Button(\"Clear\")\n\n    def user(user_message, history):\n        return \"\", history + [[user_message, None]]\n\n    def bot(history):\n        bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n        history[-1][1] = \"\"\n        for character in bot_message:\n            history[-1][1] += character\n            time.sleep(0.05)\n            yield history\n\n    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.queue()\ndemo.launch()\n\n
\n\n

You'll notice that when a user submits their message, we now chain three event events with .then():

\n\n
    \n
  1. The first method user() updates the chatbot with the user message and clears the input field. This method also makes the input field non interactive so that the user can't send another message while the chatbot is responding. Because we want this to happen instantly, we set queue=False, which would skip any queue had it been enabled. The chatbot's history is appended with (user_message, None), the None signifying that the bot has not responded.

  2. \n
  3. The second method, bot() updates the chatbot history with the bot's response. Instead of creating a new message, we just replace the previously-created None message with the bot's response. Finally, we construct the message character by character and yield the intermediate outputs as they are being constructed. Gradio automatically turns any function with the yield keyword into a streaming output interface.

  4. \n
  5. The third method makes the input field interactive again so that users can send another message to the bot.

  6. \n
\n\n

Of course, in practice, you would replace bot() with your own more complex function, which might call a pretrained model or an API, to generate a response.

\n\n

Finally, we enable queuing by running demo.queue(), which is required for streaming intermediate outputs. You can try the improved chatbot by scrolling to the demo at the top of this page.

\n\n

Adding Markdown, Images, Audio, or Videos

\n\n

The gr.Chatbot component supports a subset of markdown including bold, italics, and code. For example, we could write a function that responds to a user's message, with a bold That's cool!, like this:

\n\n
def bot(history):\n    response = \"**That's cool!**\"\n    history[-1][1] = response\n    return history\n
\n\n

In addition, it can handle media files, such as images, audio, and video. To pass in a media file, we must pass in the file as a tuple of two strings, like this: (filepath, alt_text). The alt_text is optional, so you can also just pass in a tuple with a single element (filepath,), like this:

\n\n
def add_file(history, file):\n    history = history + [((file.name,), None)]\n    return history\n
\n\n

Putting this together, we can create a multimodal chatbot with a textbox for a user to submit text and an file upload button to submit images / audio / video files. The rest of the code looks pretty much the same as before:

\n\n
import gradio as gr\nimport random\nimport time\n\n# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n\ndef add_text(history, text):\n    history = history + [(text, None)]\n    return history, gr.update(value=\"\", interactive=False)\n\n\ndef add_file(history, file):\n    history = history + [((file.name,), None)]\n    return history\n\n\ndef bot(history):\n    response = \"**That's cool!**\"\n    history[-1][1] = \"\"\n    for character in response:\n        history[-1][1] += character\n        time.sleep(0.05)\n        yield history\n\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot([], elem_id=\"chatbot\").style(height=750)\n\n    with gr.Row():\n        with gr.Column(scale=0.85):\n            txt = gr.Textbox(\n                show_label=False,\n                placeholder=\"Enter text and press enter, or upload an image\",\n            ).style(container=False)\n        with gr.Column(scale=0.15, min_width=0):\n            btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n\n    txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n    file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n\ndemo.queue()\ndemo.launch()\n\n
\n\n

\n\n

And you're done! That's all the code you need to build an interface for your chatbot model. Finally, we'll end our Guide with some links to Chatbots that are running on Spaces so that you can get an idea of what else is possible:

\n\n
    \n
  • project-baize/Baize-7B: A stylized chatbot that allows you to stop generation as well as regenerate responses.
  • \n
  • MAGAer13/mPLUG-Owl: A multimodal chatbot that allows you to upvote and downvote responses.
  • \n
\n", "tags": ["NLP", "TEXT", "CHAT"], "spaces": ["https://huggingface.co/spaces/gradio/chatbot_streaming", "https://huggingface.co/spaces/project-baize/Baize-7B", ""], "url": "/guides/creating-a-custom-chatbot-with-blocks/", "contributor": null}, {"name": "creating-a-discord-bot-from-a-gradio-app", "category": "chatbots", "pretty_category": "Chatbots", "guide_index": 3, "absolute_index": 15, "pretty_name": "Creating A Discord Bot From A Gradio App", "content": "# \ud83d\ude80 Creating Discord Bots from Gradio Apps \ud83d\ude80\n\n\n\nWe're excited to announce that Gradio can now automatically create a discord bot from a deployed app! \ud83e\udd16 \n\nDiscord is a popular communication platform that allows users to chat and interact with each other in real-time. By turning your Gradio app into a Discord bot, you can bring cutting edge AI to your discord server and give your community a whole new way to interact.\n\n## \ud83d\udcbb How does it work? \ud83d\udcbb\n\nWith `gradio_client` version `0.3.0`, any gradio `ChatInterface` app on the internet can automatically be deployed as a discord bot via the `deploy_discord` method of the `Client` class.\n\nTechnically, any gradio app that exposes an api route that takes in a single string and outputs a single string can be deployed to discord. In this guide, we will focus on `gr.ChatInterface` as those apps naturally lend themselves to discord's chat functionality.\n\n## \ud83d\udee0\ufe0f Requirements \ud83d\udee0\ufe0f\n\nMake sure you have the latest `gradio_client` and `gradio` versions installed.\n\n```bash\npip install gradio_client>=0.3.0 gradio>=3.38.0\n```\n\nAlso, make sure you have a [Hugging Face account](https://huggingface.co/) and a [write access token](https://huggingface.co/docs/hub/security-tokens).\n\n\u26a0\ufe0f Tip \u26a0\ufe0f: Make sure you login to the Hugging Face Hub by running `huggingface-cli login`. This will let you skip passing your token in all subsequent commands in this guide.\n\n## \ud83c\udfc3\u200d\u2640\ufe0f Quickstart \ud83c\udfc3\u200d\u2640\ufe0f\n\n### Step 1: Implementing our chatbot\nLet's build a very simple Chatbot using `ChatInterface` that simply repeats the user message. Write the following code into an `app.py`\n\n```python\nimport gradio as gr\n\ndef slow_echo(message, history):\n return message\n\ndemo = gr.ChatInterface(slow_echo).queue().launch()\n```\n\n### Step 2: Deploying our App\nIn order to create a discord bot for our app, it must be accessible over the internet. In this guide, we will use the `gradio deploy` command to deploy our chatbot to Hugging Face spaces from the command line. Run the following command.\n\n```bash\ngradio deploy --title echo-chatbot --app-file app.py\n```\n\nThis command will ask you some questions, e.g. requested hardware, requirements, but the default values will suffice for this guide.\nNote the URL of the space that was created. Mine is https://huggingface.co/spaces/freddyaboulton/echo-chatbot\n\n### Step 3: Creating a Discord Bot\nTurning our space into a discord bot is also a one-liner thanks to the `gradio deploy-discord`. Run the following command:\n\n```bash\ngradio deploy-discord --src freddyaboulton/echo-chatbot\n```\n\n\u2757\ufe0f Advanced \u2757\ufe0f: If you already have a discord bot token you can pass it to the `deploy-discord` command. Don't worry, if you don't have one yet!\n\n```bash\ngradio deploy-discord --src freddyaboulton/echo-chatbot --discord-bot-token \n```\n\nNote the URL that gets printed out to the console. Mine is https://huggingface.co/spaces/freddyaboulton/echo-chatbot-gradio-discord-bot\n\n### Step 4: Getting a Discord Bot Token\nIf you didn't have a discord bot token for step 3, go to the URL that got printed in the console and follow the instructions there.\nOnce you obtain a token, run the command again but this time pass in the token: \n\n```bash\ngradio deploy-discord --src freddyaboulton/echo-chatbot --discord-bot-token \n```\n\n### Step 5: Add the bot to your server\nVisit the space of your discord bot. You should see \"Add this bot to your server by clicking this link:\" followed by a URL. Go to that URL and add the bot to your server!\n\n### Step 6: Use your bot!\nBy default the bot can be called by starting a message with `/chat`, e.g. `/chat `.\n\n\u26a0\ufe0f Tip \u26a0\ufe0f: If either of the deployed spaces goes to sleep, the bot will stop working. By default, spaces go to sleep after 48 hours of inactivity. You can upgrade the hardware of your space to prevent it from going to sleep. See this [guide](https://huggingface.co/docs/hub/spaces-gpus#using-gpu-spaces) for more information.\n\n\n\n\n### Using the `gradio_client.Client` Class\nYou can also create a discord bot from a deployed gradio app with python.\n\n```python\nimport gradio_client as grc\ngrc.Client(\"freddyaboulton/echo-chatbot\").deploy_discord()\n```\n\n## \ud83e\uddbe Using State of The Art LLMs \ud83e\uddbe\n\nWe have created an organization on Hugging Face called [gradio-discord-bots](https://huggingface.co/gradio-discord-bots) containing several template spaces that explain how to deploy state of the art LLMs powered by gradio as discord bots.\n\nThe easiest way to get started is by deploying Meta's Llama 2 LLM with 70 billion parameter. Simply go to this [space](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-70b-chat-hf) and follow the instructions. \n\nThe deployment can be done in one line! \ud83e\udd2f\n\n```python\nimport gradio_client as grc\ngrc.Client(\"ysharma/Explore_llamav2_with_TGI\").deploy_discord(to_id=\"llama2-70b-discord-bot\")\n```\n\n## \ud83e\udd9c Additional LLMs \ud83e\udd9c\n\nIn addion to Meta's 70 billion Llama 2 model, we have prepared template spaces for the following LLMs and deployment options:\n\n* [gpt-3.5-turbo](https://huggingface.co/spaces/gradio-discord-bots/gpt-35-turbo), powered by openai. Required OpenAI key.\n* [falcon-7b-instruct](https://huggingface.co/spaces/gradio-discord-bots/falcon-7b-instruct) powered by Hugging Face Inference Endpoints.\n* [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-13b-chat-hf) powered by Hugging Face Inference Endpoints.\n* [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/llama-2-13b-chat-transformers) powered by Hugging Face transformers.\n\nTo deploy any of these models to discord, simply follow the instructions in the linked space for that model.\n\n## Deploying non-chat gradio apps to discord\n\nAs mentioned above, you don't need a `gr.ChatInterface` if you want to deploy your gradio app to discord. All that's needed is an api route that takes in a single string and outputs a single string. \n\nThe following code will deploy a space that translates english to german as a discord bot.\n\n```python\nimport gradio_client as grc\nclient = grc.Client(\"freddyaboulton/english-to-german\")\nclient.deploy_discord(api_names=['german'])\n```\n\n## Conclusion\n\nThat's it for this guide! We're really excited about this feature. Tag [@Gradio](https://twitter.com/Gradio) on twitter and show us how your discord community interacts with your discord bots. ", "html": "

\ud83d\ude80 Creating Discord Bots from Gradio Apps \ud83d\ude80

\n\n

We're excited to announce that Gradio can now automatically create a discord bot from a deployed app! \ud83e\udd16

\n\n

Discord is a popular communication platform that allows users to chat and interact with each other in real-time. By turning your Gradio app into a Discord bot, you can bring cutting edge AI to your discord server and give your community a whole new way to interact.

\n\n

\ud83d\udcbb How does it work? \ud83d\udcbb

\n\n

With gradio_client version 0.3.0, any gradio ChatInterface app on the internet can automatically be deployed as a discord bot via the deploy_discord method of the Client class.

\n\n

Technically, any gradio app that exposes an api route that takes in a single string and outputs a single string can be deployed to discord. In this guide, we will focus on gr.ChatInterface as those apps naturally lend themselves to discord's chat functionality.

\n\n

\ud83d\udee0\ufe0f Requirements \ud83d\udee0\ufe0f

\n\n

Make sure you have the latest gradio_client and gradio versions installed.

\n\n
pip install gradio_client>=0.3.0 gradio>=3.38.0\n
\n\n

Also, make sure you have a Hugging Face account and a write access token.

\n\n

\u26a0\ufe0f Tip \u26a0\ufe0f: Make sure you login to the Hugging Face Hub by running huggingface-cli login. This will let you skip passing your token in all subsequent commands in this guide.

\n\n

\ud83c\udfc3\u200d\u2640\ufe0f Quickstart \ud83c\udfc3\u200d\u2640\ufe0f

\n\n

Step 1: Implementing our chatbot

\n\n

Let's build a very simple Chatbot using ChatInterface that simply repeats the user message. Write the following code into an app.py

\n\n
import gradio as gr\n\ndef slow_echo(message, history):\n    return message\n\ndemo = gr.ChatInterface(slow_echo).queue().launch()\n
\n\n

Step 2: Deploying our App

\n\n

In order to create a discord bot for our app, it must be accessible over the internet. In this guide, we will use the gradio deploy command to deploy our chatbot to Hugging Face spaces from the command line. Run the following command.

\n\n
gradio deploy --title echo-chatbot --app-file app.py\n
\n\n

This command will ask you some questions, e.g. requested hardware, requirements, but the default values will suffice for this guide.\nNote the URL of the space that was created. Mine is https://huggingface.co/spaces/freddyaboulton/echo-chatbot

\n\n

Step 3: Creating a Discord Bot

\n\n

Turning our space into a discord bot is also a one-liner thanks to the gradio deploy-discord. Run the following command:

\n\n
gradio deploy-discord --src freddyaboulton/echo-chatbot\n
\n\n

\u2757\ufe0f Advanced \u2757\ufe0f: If you already have a discord bot token you can pass it to the deploy-discord command. Don't worry, if you don't have one yet!

\n\n
gradio deploy-discord --src freddyaboulton/echo-chatbot --discord-bot-token \n
\n\n

Note the URL that gets printed out to the console. Mine is https://huggingface.co/spaces/freddyaboulton/echo-chatbot-gradio-discord-bot

\n\n

Step 4: Getting a Discord Bot Token

\n\n

If you didn't have a discord bot token for step 3, go to the URL that got printed in the console and follow the instructions there.\nOnce you obtain a token, run the command again but this time pass in the token:

\n\n
gradio deploy-discord --src freddyaboulton/echo-chatbot --discord-bot-token \n
\n\n

Step 5: Add the bot to your server

\n\n

Visit the space of your discord bot. You should see \"Add this bot to your server by clicking this link:\" followed by a URL. Go to that URL and add the bot to your server!

\n\n

Step 6: Use your bot!

\n\n

By default the bot can be called by starting a message with /chat, e.g. /chat <your prompt here>.

\n\n

\u26a0\ufe0f Tip \u26a0\ufe0f: If either of the deployed spaces goes to sleep, the bot will stop working. By default, spaces go to sleep after 48 hours of inactivity. You can upgrade the hardware of your space to prevent it from going to sleep. See this guide for more information.

\n\n

\n\n

Using the gradio_client.Client Class

\n\n

You can also create a discord bot from a deployed gradio app with python.

\n\n
import gradio_client as grc\ngrc.Client(\"freddyaboulton/echo-chatbot\").deploy_discord()\n
\n\n

\ud83e\uddbe Using State of The Art LLMs \ud83e\uddbe

\n\n

We have created an organization on Hugging Face called gradio-discord-bots containing several template spaces that explain how to deploy state of the art LLMs powered by gradio as discord bots.

\n\n

The easiest way to get started is by deploying Meta's Llama 2 LLM with 70 billion parameter. Simply go to this space and follow the instructions.

\n\n

The deployment can be done in one line! \ud83e\udd2f

\n\n
import gradio_client as grc\ngrc.Client(\"ysharma/Explore_llamav2_with_TGI\").deploy_discord(to_id=\"llama2-70b-discord-bot\")\n
\n\n

\ud83e\udd9c Additional LLMs \ud83e\udd9c

\n\n

In addion to Meta's 70 billion Llama 2 model, we have prepared template spaces for the following LLMs and deployment options:

\n\n\n\n

To deploy any of these models to discord, simply follow the instructions in the linked space for that model.

\n\n

Deploying non-chat gradio apps to discord

\n\n

As mentioned above, you don't need a gr.ChatInterface if you want to deploy your gradio app to discord. All that's needed is an api route that takes in a single string and outputs a single string.

\n\n

The following code will deploy a space that translates english to german as a discord bot.

\n\n
import gradio_client as grc\nclient = grc.Client(\"freddyaboulton/english-to-german\")\nclient.deploy_discord(api_names=['german'])\n
\n\n

Conclusion

\n\n

That's it for this guide! We're really excited about this feature. Tag @Gradio on twitter and show us how your discord community interacts with your discord bots.

\n", "tags": ["NLP", "TEXT", "CHAT"], "spaces": [], "url": "/guides/creating-a-discord-bot-from-a-gradio-app/", "contributor": null}]}, {"category": "Integrating Other Frameworks", "guides": [{"name": "using-hugging-face-integrations", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": 1, "absolute_index": 16, "pretty_name": "Using Hugging Face Integrations", "content": "# Using Hugging Face Integrations\n\n\n\n\n\n\n## Introduction\n\nThe Hugging Face Hub is a central platform that has over 190,000 [models](https://huggingface.co/models), 32,000 [datasets](https://huggingface.co/datasets) and 40,000 [demos](https://huggingface.co/spaces), also known as Spaces. Although Hugging Face is famous for its \ud83e\udd17 transformers and diffusers libraries, the Hub also supports dozens of ML libraries, such as PyTorch, TensorFlow, spaCy, and many others across a variety of domains, from computer vision to reinforcement learning.\n\nGradio has multiple features that make it extremely easy to leverage existing models and Spaces on the Hub. This guide walks through these features.\n\n## Using regular inference with `pipeline`\n\nFirst, let's build a simple interface that translates text from English to Spanish. Between the over a thousand models shared by the University of Helsinki, there is an [existing model](https://huggingface.co/Helsinki-NLP/opus-mt-en-es), `opus-mt-en-es`, that does precisely this!\n\nThe \ud83e\udd17 transformers library has a very easy-to-use abstraction, [`pipeline()`](https://huggingface.co/docs/transformers/v4.16.2/en/main_classes/pipelines#transformers.pipeline) that handles most of the complex code to offer a simple API for common tasks. By specifying the task and an (optional) model, you can use an existing model with few lines:\n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndef predict(text):\n return pipe(text)[0][\"translation_text\"]\n \ndemo = gr.Interface(\n fn=predict, \n inputs='text',\n outputs='text',\n)\n\ndemo.launch()\n```\n\nBut `gradio` actually makes it even easier to convert a `pipeline` to a demo, simply by using the `gradio.Interface.from_pipeline` methods, which skips the need to specify the input and output components:\n\n```python\nfrom transformers import pipeline\nimport gradio as gr\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndemo = gr.Interface.from_pipeline(pipe)\ndemo.launch()\n```\n\nThe previous code produces the following interface, which you can try right here in your browser: \n\n\n\n\n\n## Using Hugging Face Inference API\n\nHugging Face has a free service called the [Inference API](https://huggingface.co/inference-api), which allows you to send HTTP requests to models in the Hub. For transformers or diffusers-based models, the API can be 2 to 10 times faster than running the inference yourself. The API is free (rate limited), and you can switch to dedicated [Inference Endpoints](https://huggingface.co/pricing) when you want to use it in production.\n\nLet's try the same demo as above but using the Inference API instead of loading the model yourself. Given a Hugging Face model supported in the Inference API, Gradio can automatically infer the expected input and output and make the underlying server calls, so you don't have to worry about defining the prediction function. Here is what the code would look like!\n\n```python\nimport gradio as gr\n\ndemo = gr.load(\"Helsinki-NLP/opus-mt-en-es\", src=\"models\")\n\ndemo.launch()\n```\n\nNotice that we just put specify the model name and state that the `src` should be `models` (Hugging Face's Model Hub). There is no need to install any dependencies (except `gradio`) since you are not loading the model on your computer.\n\nYou might notice that the first inference takes about 20 seconds. This happens since the Inference API is loading the model in the server. You get some benefits afterward:\n\n* The inference will be much faster.\n* The server caches your requests.\n* You get built-in automatic scaling.\n\n## Hosting your Gradio demos\n\n[Hugging Face Spaces](https://hf.co/spaces) allows anyone to host their Gradio demos freely, and uploading your Gradio demos take a couple of minutes. You can head to [hf.co/new-space](https://huggingface.co/new-space), select the Gradio SDK, create an `app.py` file, and voila! You have a demo you can share with anyone else. To learn more, read [this guide how to host on Hugging Face Spaces using the website](https://huggingface.co/blog/gradio-spaces).\n\n\nAlternatively, you can create a Space programmatically, making use of the [huggingface_hub client library](https://huggingface.co/docs/huggingface_hub/index) library. Here's an example:\n\n```python\nfrom huggingface_hub import (\n create_repo,\n get_full_repo_name,\n upload_file,\n)\ncreate_repo(name=target_space_name, token=hf_token, repo_type=\"space\", space_sdk=\"gradio\")\nrepo_name = get_full_repo_name(model_id=target_space_name, token=hf_token)\nfile_url = upload_file(\n path_or_fileobj=\"file.txt\",\n path_in_repo=\"app.py\",\n repo_id=repo_name,\n repo_type=\"space\",\n token=hf_token,\n)\n```\nHere, `create_repo` creates a gradio repo with the target name under a specific account using that account's Write Token. `repo_name` gets the full repo name of the related repo. Finally `upload_file` uploads a file inside the repo with the name `app.py`.\n\n\n\n## Embedding your Space demo on other websites\n\nThroughout this guide, you've seen many embedded Gradio demos. You can also do this on own website! The first step is to create a Hugging Face Space with the demo you want to showcase. Then, [follow the steps here to embed the Space on your website](/guides/sharing-your-app/#embedding-hosted-spaces).\n\n\n## Loading demos from Spaces\n\nYou can also use and remix existing Gradio demos on Hugging Face Spaces. For example, you could take two existing Gradio demos and put them as separate tabs and create a new demo. You can run this new demo locally, or upload it to Spaces, allowing endless possibilities to remix and create new demos!\n\nHere's an example that does exactly that:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Tab(\"Translate to Spanish\"):\n gr.load(\"gradio/helsinki_translation_en_es\", src=\"spaces\")\n with gr.Tab(\"Translate to French\"):\n gr.load(\"abidlabs/en2fr\", src=\"spaces\")\n\ndemo.launch()\n```\n\nNotice that we use `gr.load()`, the same method we used to load models using the Inference API. However, here we specify that the `src` is `spaces` (Hugging Face Spaces).\n\n## Recap\n\nThat's it! Let's recap the various ways Gradio and Hugging Face work together:\n\n1. You can convert a `transformers` pipeline into a Gradio demo using `from_pipeline()`\n2. You can build a demo around the Inference API without having to load the model easily using `gr.load()`\n3. You host your Gradio demo on Hugging Face Spaces, either using the GUI or entirely in Python.\n4. You can embed Gradio demos that are hosted on Hugging Face Spaces onto your own website.\n5. You can load demos from Hugging Face Spaces to remix and create new Gradio demos using `gr.load()`.\n\n\n\ud83e\udd17\n", "html": "

Using Hugging Face Integrations

\n\n

Introduction

\n\n

The Hugging Face Hub is a central platform that has over 190,000 models, 32,000 datasets and 40,000 demos, also known as Spaces. Although Hugging Face is famous for its \ud83e\udd17 transformers and diffusers libraries, the Hub also supports dozens of ML libraries, such as PyTorch, TensorFlow, spaCy, and many others across a variety of domains, from computer vision to reinforcement learning.

\n\n

Gradio has multiple features that make it extremely easy to leverage existing models and Spaces on the Hub. This guide walks through these features.

\n\n

Using regular inference with pipeline

\n\n

First, let's build a simple interface that translates text from English to Spanish. Between the over a thousand models shared by the University of Helsinki, there is an existing model, opus-mt-en-es, that does precisely this!

\n\n

The \ud83e\udd17 transformers library has a very easy-to-use abstraction, pipeline() that handles most of the complex code to offer a simple API for common tasks. By specifying the task and an (optional) model, you can use an existing model with few lines:

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndef predict(text):\n  return pipe(text)[0][\"translation_text\"]\n\ndemo = gr.Interface(\n  fn=predict, \n  inputs='text',\n  outputs='text',\n)\n\ndemo.launch()\n
\n\n

But gradio actually makes it even easier to convert a pipeline to a demo, simply by using the gradio.Interface.from_pipeline methods, which skips the need to specify the input and output components:

\n\n
from transformers import pipeline\nimport gradio as gr\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndemo = gr.Interface.from_pipeline(pipe)\ndemo.launch()\n
\n\n

The previous code produces the following interface, which you can try right here in your browser:

\n\n

\n\n

Using Hugging Face Inference API

\n\n

Hugging Face has a free service called the Inference API, which allows you to send HTTP requests to models in the Hub. For transformers or diffusers-based models, the API can be 2 to 10 times faster than running the inference yourself. The API is free (rate limited), and you can switch to dedicated Inference Endpoints when you want to use it in production.

\n\n

Let's try the same demo as above but using the Inference API instead of loading the model yourself. Given a Hugging Face model supported in the Inference API, Gradio can automatically infer the expected input and output and make the underlying server calls, so you don't have to worry about defining the prediction function. Here is what the code would look like!

\n\n
import gradio as gr\n\ndemo = gr.load(\"Helsinki-NLP/opus-mt-en-es\", src=\"models\")\n\ndemo.launch()\n
\n\n

Notice that we just put specify the model name and state that the src should be models (Hugging Face's Model Hub). There is no need to install any dependencies (except gradio) since you are not loading the model on your computer.

\n\n

You might notice that the first inference takes about 20 seconds. This happens since the Inference API is loading the model in the server. You get some benefits afterward:

\n\n
    \n
  • The inference will be much faster.
  • \n
  • The server caches your requests.
  • \n
  • You get built-in automatic scaling.
  • \n
\n\n

Hosting your Gradio demos

\n\n

Hugging Face Spaces allows anyone to host their Gradio demos freely, and uploading your Gradio demos take a couple of minutes. You can head to hf.co/new-space, select the Gradio SDK, create an app.py file, and voila! You have a demo you can share with anyone else. To learn more, read this guide how to host on Hugging Face Spaces using the website.

\n\n

Alternatively, you can create a Space programmatically, making use of the huggingface_hub client library library. Here's an example:

\n\n
from huggingface_hub import (\n    create_repo,\n    get_full_repo_name,\n    upload_file,\n)\ncreate_repo(name=target_space_name, token=hf_token, repo_type=\"space\", space_sdk=\"gradio\")\nrepo_name = get_full_repo_name(model_id=target_space_name, token=hf_token)\nfile_url = upload_file(\n    path_or_fileobj=\"file.txt\",\n    path_in_repo=\"app.py\",\n    repo_id=repo_name,\n    repo_type=\"space\",\n    token=hf_token,\n)\n
\n\n

Here, create_repo creates a gradio repo with the target name under a specific account using that account's Write Token. repo_name gets the full repo name of the related repo. Finally upload_file uploads a file inside the repo with the name app.py.

\n\n

Embedding your Space demo on other websites

\n\n

Throughout this guide, you've seen many embedded Gradio demos. You can also do this on own website! The first step is to create a Hugging Face Space with the demo you want to showcase. Then, follow the steps here to embed the Space on your website.

\n\n

Loading demos from Spaces

\n\n

You can also use and remix existing Gradio demos on Hugging Face Spaces. For example, you could take two existing Gradio demos and put them as separate tabs and create a new demo. You can run this new demo locally, or upload it to Spaces, allowing endless possibilities to remix and create new demos!

\n\n

Here's an example that does exactly that:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n  with gr.Tab(\"Translate to Spanish\"):\n    gr.load(\"gradio/helsinki_translation_en_es\", src=\"spaces\")\n  with gr.Tab(\"Translate to French\"):\n    gr.load(\"abidlabs/en2fr\", src=\"spaces\")\n\ndemo.launch()\n
\n\n

Notice that we use gr.load(), the same method we used to load models using the Inference API. However, here we specify that the src is spaces (Hugging Face Spaces).

\n\n

Recap

\n\n

That's it! Let's recap the various ways Gradio and Hugging Face work together:

\n\n
    \n
  1. You can convert a transformers pipeline into a Gradio demo using from_pipeline()
  2. \n
  3. You can build a demo around the Inference API without having to load the model easily using gr.load()
  4. \n
  5. You host your Gradio demo on Hugging Face Spaces, either using the GUI or entirely in Python.
  6. \n
  7. You can embed Gradio demos that are hosted on Hugging Face Spaces onto your own website.
  8. \n
  9. You can load demos from Hugging Face Spaces to remix and create new Gradio demos using gr.load().
  10. \n
\n\n

\ud83e\udd17

\n", "tags": ["HUB", "SPACES", "EMBED"], "spaces": ["https://huggingface.co/spaces/gradio/helsinki_translation_en_es"], "url": "/guides/using-hugging-face-integrations/", "contributor": "Omar Sanseviero \ud83e\udd99 "}, {"name": "Gradio-and-Comet", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 17, "pretty_name": "Gradio And Comet", "content": "# Using Gradio and Comet\n\n\n\n\n## Introduction\n\nIn this guide we will demonstrate some of the ways you can use Gradio with Comet. We will cover the basics of using Comet with Gradio and show you some of the ways that you can leverage Gradio's advanced features such as [Embedding with iFrames](https://www.gradio.app/guides/sharing-your-app/#embedding-with-iframes) and [State](https://www.gradio.app/docs/#state) to build some amazing model evaluation workflows.\n\nHere is a list of the topics covered in this guide.\n\n1. Logging Gradio UI's to your Comet Experiments\n2. Embedding Gradio Applications directly into your Comet Projects\n3. Embedding Hugging Face Spaces directly into your Comet Projects\n4. Logging Model Inferences from your Gradio Application to Comet\n\n\n## What is Comet?\n\n[Comet](https://www.comet.com?utm_source=gradio&utm_medium=referral&utm_campaign=gradio-integration&utm_content=gradio-docs) is an MLOps Platform that is designed to help Data Scientists and Teams build better models faster! Comet provides tooling to Track, Explain, Manage, and Monitor your models in a single place! It works with Jupyter Notebooks and Scripts and most importantly it's 100% free!\n\n\n## Setup\n\nFirst, install the dependencies needed to run these examples\n\n```shell\npip install comet_ml torch torchvision transformers gradio shap requests Pillow\n```\n\nNext, you will need to [sign up for a Comet Account](https://www.comet.com/signup?utm_source=gradio&utm_medium=referral&utm_campaign=gradio-integration&utm_content=gradio-docs). Once you have your account set up, [grab your API Key](https://www.comet.com/docs/v2/guides/getting-started/quickstart/#get-an-api-key?utm_source=gradio&utm_medium=referral&utm_campaign=gradio-integration&utm_content=gradio-docs) and configure your Comet credentials\n\nIf you're running these examples as a script, you can either export your credentials as environment variables\n\n```shell\nexport COMET_API_KEY=\"\"\nexport COMET_WORKSPACE=\"\"\nexport COMET_PROJECT_NAME=\"\"\n```\n\nor set them in a `.comet.config` file in your working directory. You file should be formatted in the following way.\n\n```shell\n[comet]\napi_key=\nworkspace=\nproject_name=\n```\n\nIf you are using the provided Colab Notebooks to run these examples, please run the cell with the following snippet before starting the Gradio UI. Running this cell allows you to interactively add your API key to the notebook.\n\n```python\nimport comet_ml\ncomet_ml.init()\n```\n\n## 1. Logging Gradio UI's to your Comet Experiments\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/model-evaluation/gradio/notebooks/Gradio_and_Comet.ipynb)\n\nIn this example, we will go over how to log your Gradio Applications to Comet and interact with them using the Gradio Custom Panel.\n\nLet's start by building a simple Image Classification example using `resnet18`.\n\n```python\nimport comet_ml\n\nimport requests\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\n\ntorch.hub.download_url_to_file(\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n\nif torch.cuda.is_available():\n device = \"cuda\"\nelse:\n device = \"cpu\"\n\nmodel = torch.hub.load(\"pytorch/vision:v0.6.0\", \"resnet18\", pretrained=True).eval()\nmodel = model.to(device)\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef predict(inp):\n inp = Image.fromarray(inp.astype(\"uint8\"), \"RGB\")\n inp = transforms.ToTensor()(inp).unsqueeze(0)\n with torch.no_grad():\n prediction = torch.nn.functional.softmax(model(inp.to(device))[0], dim=0)\n return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\ninputs = gr.Image()\noutputs = gr.Label(num_top_classes=3)\n\nio = gr.Interface(\n fn=predict, inputs=inputs, outputs=outputs, examples=[\"dog.jpg\"]\n)\nio.launch(inline=False, share=True)\n\nexperiment = comet_ml.Experiment()\nexperiment.add_tag(\"image-classifier\")\n\nio.integrate(comet_ml=experiment)\n```\n\nThe last line in this snippet will log the URL of the Gradio Application to your Comet Experiment. You can find the URL in the Text Tab of your Experiment.\n\n\n\nAdd the Gradio Panel to your Experiment to interact with your application.\n\n\n\n\n## 2. Embedding Gradio Applications directly into your Comet Projects\n\n\n\nIf you are permanently hosting your Gradio application, you can embed the UI using the Gradio Panel Extended custom Panel.\n\nGo to your Comet Project page, and head over to the Panels tab. Click the `+ Add` button to bring up the Panels search page.\n\n\"adding-panels\"\n\nNext, search for Gradio Panel Extended in the Public Panels section and click `Add`.\n\n\"gradio-panel-extended\"\n\nOnce you have added your Panel, click `Edit` to access to the Panel Options page and paste in the URL of your Gradio application.\n\n![Edit-Gradio-Panel-Options](https://user-images.githubusercontent.com/7529846/214573001-23814b5a-ca65-4ace-a8a5-b27cdda70f7a.gif)\n\n\"Edit-Gradio-Panel-URL\"\n\n\n## 3. Embedding Hugging Face Spaces directly into your Comet Projects\n\n\n\nYou can also embed Gradio Applications that are hosted on Hugging Faces Spaces into your Comet Projects using the Hugging Face Spaces Panel.\n\nGo to your Comet Project page, and head over to the Panels tab. Click the `+ Add` button to bring up the Panels search page. Next, search for the Hugging Face Spaces Panel in the Public Panels section and click `Add`.\n\n\"huggingface-spaces-panel\"\n\nOnce you have added your Panel, click Edit to access to the Panel Options page and paste in the path of your Hugging Face Space e.g. `pytorch/ResNet`\n\n\"Edit-HF-Space\"\n\n## 4. Logging Model Inferences to Comet\n\n\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/model-evaluation/gradio/notebooks/Logging_Model_Inferences_with_Comet_and_Gradio.ipynb)\n\n\nIn the previous examples, we demonstrated the various ways in which you can interact with a Gradio application through the Comet UI. Additionally, you can also log model inferences, such as SHAP plots, from your Gradio application to Comet.\n\nIn the following snippet, we're going to log inferences from a Text Generation model. We can persist an Experiment across multiple inference calls using Gradio's [State](https://www.gradio.app/docs/#state) object. This will allow you to log multiple inferences from a model to a single Experiment.\n\n```python\nimport comet_ml\nimport gradio as gr\nimport shap\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nif torch.cuda.is_available():\n device = \"cuda\"\nelse:\n device = \"cpu\"\n\nMODEL_NAME = \"gpt2\"\n\nmodel = AutoModelForCausalLM.from_pretrained(MODEL_NAME)\n\n# set model decoder to true\nmodel.config.is_decoder = True\n# set text-generation params under task_specific_params\nmodel.config.task_specific_params[\"text-generation\"] = {\n \"do_sample\": True,\n \"max_length\": 50,\n \"temperature\": 0.7,\n \"top_k\": 50,\n \"no_repeat_ngram_size\": 2,\n}\nmodel = model.to(device)\n\ntokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)\nexplainer = shap.Explainer(model, tokenizer)\n\n\ndef start_experiment():\n \"\"\"Returns an APIExperiment object that is thread safe\n and can be used to log inferences to a single Experiment\n \"\"\"\n try:\n api = comet_ml.API()\n workspace = api.get_default_workspace()\n project_name = comet_ml.config.get_config()[\"comet.project_name\"]\n\n experiment = comet_ml.APIExperiment(\n workspace=workspace, project_name=project_name\n )\n experiment.log_other(\"Created from\", \"gradio-inference\")\n\n message = f\"Started Experiment: [{experiment.name}]({experiment.url})\"\n\n return (experiment, message)\n\n except Exception as e:\n return None, None\n\n\ndef predict(text, state, message):\n experiment = state\n\n shap_values = explainer([text])\n plot = shap.plots.text(shap_values, display=False)\n\n if experiment is not None:\n experiment.log_other(\"message\", message)\n experiment.log_html(plot)\n\n return plot\n\n\nwith gr.Blocks() as demo:\n start_experiment_btn = gr.Button(\"Start New Experiment\")\n experiment_status = gr.Markdown()\n\n # Log a message to the Experiment to provide more context\n experiment_message = gr.Textbox(label=\"Experiment Message\")\n experiment = gr.State()\n\n input_text = gr.Textbox(label=\"Input Text\", lines=5, interactive=True)\n submit_btn = gr.Button(\"Submit\")\n\n output = gr.HTML(interactive=True)\n\n start_experiment_btn.click(\n start_experiment, outputs=[experiment, experiment_status]\n )\n submit_btn.click(\n predict, inputs=[input_text, experiment, experiment_message], outputs=[output]\n )\n```\n\nInferences from this snippet will be saved in the HTML tab of your experiment.\n\n\n\n## Conclusion\n\nWe hope you found this guide useful and that it provides some inspiration to help you build awesome model evaluation workflows with Comet and Gradio.\n\n## How to contribute Gradio demos on HF spaces on the Comet organization\n\n* Create an account on Hugging Face [here](https://huggingface.co/join).\n* Add Gradio Demo under your username, see this [course](https://huggingface.co/course/chapter9/4?fw=pt) for setting up Gradio Demo on Hugging Face.\n* Request to join the Comet organization [here](https://huggingface.co/Comet).\n\n## Additional Resources\n\n* [Comet Documentation](https://www.comet.com/docs/v2/?utm_source=gradio&utm_medium=referral&utm_campaign=gradio-integration&utm_content=gradio-docs)\n", "html": "

Using Gradio and Comet

\n\n

Introduction

\n\n

In this guide we will demonstrate some of the ways you can use Gradio with Comet. We will cover the basics of using Comet with Gradio and show you some of the ways that you can leverage Gradio's advanced features such as Embedding with iFrames and State to build some amazing model evaluation workflows.

\n\n

Here is a list of the topics covered in this guide.

\n\n
    \n
  1. Logging Gradio UI's to your Comet Experiments
  2. \n
  3. Embedding Gradio Applications directly into your Comet Projects
  4. \n
  5. Embedding Hugging Face Spaces directly into your Comet Projects
  6. \n
  7. Logging Model Inferences from your Gradio Application to Comet
  8. \n
\n\n

What is Comet?

\n\n

Comet is an MLOps Platform that is designed to help Data Scientists and Teams build better models faster! Comet provides tooling to Track, Explain, Manage, and Monitor your models in a single place! It works with Jupyter Notebooks and Scripts and most importantly it's 100% free!

\n\n

Setup

\n\n

First, install the dependencies needed to run these examples

\n\n
pip install comet_ml torch torchvision transformers gradio shap requests Pillow\n
\n\n

Next, you will need to sign up for a Comet Account. Once you have your account set up, grab your API Key and configure your Comet credentials

\n\n

If you're running these examples as a script, you can either export your credentials as environment variables

\n\n
export COMET_API_KEY=\"\"\nexport COMET_WORKSPACE=\"\"\nexport COMET_PROJECT_NAME=\"\"\n
\n\n

or set them in a .comet.config file in your working directory. You file should be formatted in the following way.

\n\n
[comet]\napi_key=\nworkspace=\nproject_name=\n
\n\n

If you are using the provided Colab Notebooks to run these examples, please run the cell with the following snippet before starting the Gradio UI. Running this cell allows you to interactively add your API key to the notebook.

\n\n
import comet_ml\ncomet_ml.init()\n
\n\n

1. Logging Gradio UI's to your Comet Experiments

\n\n

\"Open

\n\n

In this example, we will go over how to log your Gradio Applications to Comet and interact with them using the Gradio Custom Panel.

\n\n

Let's start by building a simple Image Classification example using resnet18.

\n\n
import comet_ml\n\nimport requests\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\n\ntorch.hub.download_url_to_file(\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n\nif torch.cuda.is_available():\n    device = \"cuda\"\nelse:\n    device = \"cpu\"\n\nmodel = torch.hub.load(\"pytorch/vision:v0.6.0\", \"resnet18\", pretrained=True).eval()\nmodel = model.to(device)\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef predict(inp):\n    inp = Image.fromarray(inp.astype(\"uint8\"), \"RGB\")\n    inp = transforms.ToTensor()(inp).unsqueeze(0)\n    with torch.no_grad():\n        prediction = torch.nn.functional.softmax(model(inp.to(device))[0], dim=0)\n    return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\ninputs = gr.Image()\noutputs = gr.Label(num_top_classes=3)\n\nio = gr.Interface(\n    fn=predict, inputs=inputs, outputs=outputs, examples=[\"dog.jpg\"]\n)\nio.launch(inline=False, share=True)\n\nexperiment = comet_ml.Experiment()\nexperiment.add_tag(\"image-classifier\")\n\nio.integrate(comet_ml=experiment)\n
\n\n

The last line in this snippet will log the URL of the Gradio Application to your Comet Experiment. You can find the URL in the Text Tab of your Experiment.

\n\n

\n\n

Add the Gradio Panel to your Experiment to interact with your application.

\n\n

\n\n

2. Embedding Gradio Applications directly into your Comet Projects

\n\n\n\n

If you are permanently hosting your Gradio application, you can embed the UI using the Gradio Panel Extended custom Panel.

\n\n

Go to your Comet Project page, and head over to the Panels tab. Click the + Add button to bring up the Panels search page.

\n\n

\"adding-panels\"

\n\n

Next, search for Gradio Panel Extended in the Public Panels section and click Add.

\n\n

\"gradio-panel-extended\"

\n\n

Once you have added your Panel, click Edit to access to the Panel Options page and paste in the URL of your Gradio application.

\n\n

\"Edit-Gradio-Panel-Options\"

\n\n

\"Edit-Gradio-Panel-URL\"

\n\n

3. Embedding Hugging Face Spaces directly into your Comet Projects

\n\n\n\n

You can also embed Gradio Applications that are hosted on Hugging Faces Spaces into your Comet Projects using the Hugging Face Spaces Panel.

\n\n

Go to your Comet Project page, and head over to the Panels tab. Click the + Add button to bring up the Panels search page. Next, search for the Hugging Face Spaces Panel in the Public Panels section and click Add.

\n\n

\"huggingface-spaces-panel\"

\n\n

Once you have added your Panel, click Edit to access to the Panel Options page and paste in the path of your Hugging Face Space e.g. pytorch/ResNet

\n\n

\"Edit-HF-Space\"

\n\n

4. Logging Model Inferences to Comet

\n\n\n\n

\"Open

\n\n

In the previous examples, we demonstrated the various ways in which you can interact with a Gradio application through the Comet UI. Additionally, you can also log model inferences, such as SHAP plots, from your Gradio application to Comet.

\n\n

In the following snippet, we're going to log inferences from a Text Generation model. We can persist an Experiment across multiple inference calls using Gradio's State object. This will allow you to log multiple inferences from a model to a single Experiment.

\n\n
import comet_ml\nimport gradio as gr\nimport shap\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nif torch.cuda.is_available():\n    device = \"cuda\"\nelse:\n    device = \"cpu\"\n\nMODEL_NAME = \"gpt2\"\n\nmodel = AutoModelForCausalLM.from_pretrained(MODEL_NAME)\n\n# set model decoder to true\nmodel.config.is_decoder = True\n# set text-generation params under task_specific_params\nmodel.config.task_specific_params[\"text-generation\"] = {\n    \"do_sample\": True,\n    \"max_length\": 50,\n    \"temperature\": 0.7,\n    \"top_k\": 50,\n    \"no_repeat_ngram_size\": 2,\n}\nmodel = model.to(device)\n\ntokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)\nexplainer = shap.Explainer(model, tokenizer)\n\n\ndef start_experiment():\n    \"\"\"Returns an APIExperiment object that is thread safe\n    and can be used to log inferences to a single Experiment\n    \"\"\"\n    try:\n        api = comet_ml.API()\n        workspace = api.get_default_workspace()\n        project_name = comet_ml.config.get_config()[\"comet.project_name\"]\n\n        experiment = comet_ml.APIExperiment(\n            workspace=workspace, project_name=project_name\n        )\n        experiment.log_other(\"Created from\", \"gradio-inference\")\n\n        message = f\"Started Experiment: [{experiment.name}]({experiment.url})\"\n\n        return (experiment, message)\n\n    except Exception as e:\n        return None, None\n\n\ndef predict(text, state, message):\n    experiment = state\n\n    shap_values = explainer([text])\n    plot = shap.plots.text(shap_values, display=False)\n\n    if experiment is not None:\n        experiment.log_other(\"message\", message)\n        experiment.log_html(plot)\n\n    return plot\n\n\nwith gr.Blocks() as demo:\n    start_experiment_btn = gr.Button(\"Start New Experiment\")\n    experiment_status = gr.Markdown()\n\n    # Log a message to the Experiment to provide more context\n    experiment_message = gr.Textbox(label=\"Experiment Message\")\n    experiment = gr.State()\n\n    input_text = gr.Textbox(label=\"Input Text\", lines=5, interactive=True)\n    submit_btn = gr.Button(\"Submit\")\n\n    output = gr.HTML(interactive=True)\n\n    start_experiment_btn.click(\n        start_experiment, outputs=[experiment, experiment_status]\n    )\n    submit_btn.click(\n        predict, inputs=[input_text, experiment, experiment_message], outputs=[output]\n    )\n
\n\n

Inferences from this snippet will be saved in the HTML tab of your experiment.

\n\n

\n\n

Conclusion

\n\n

We hope you found this guide useful and that it provides some inspiration to help you build awesome model evaluation workflows with Comet and Gradio.

\n\n

How to contribute Gradio demos on HF spaces on the Comet organization

\n\n
    \n
  • Create an account on Hugging Face here.
  • \n
  • Add Gradio Demo under your username, see this course for setting up Gradio Demo on Hugging Face.
  • \n
  • Request to join the Comet organization here.
  • \n
\n\n

Additional Resources

\n\n\n", "tags": ["COMET", "SPACES"], "spaces": [], "url": "/guides/Gradio-and-Comet/", "contributor": "the Comet team"}, {"name": "Gradio-and-ONNX-on-Hugging-Face", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 18, "pretty_name": "Gradio And ONNX On Hugging Face", "content": "# Gradio and ONNX on Hugging Face\n\n\n\n\n\n## Introduction\n\nIn this Guide, we'll walk you through:\n\n* Introduction of ONNX, ONNX model zoo, Gradio, and Hugging Face Spaces\n* How to setup a Gradio demo for EfficientNet-Lite4\n* How to contribute your own Gradio demos for the ONNX organization on Hugging Face\n\nHere's an example of an ONNX model: try out the EfficientNet-Lite4 demo below.\n\n\n\n## What is the ONNX Model Zoo?\nOpen Neural Network Exchange ([ONNX](https://onnx.ai/)) is an open standard format for representing machine learning models. ONNX is supported by a community of partners who have implemented it in many frameworks and tools. For example, if you have trained a model in TensorFlow or PyTorch, you can convert it to ONNX easily, and from there run it on a variety of devices using an engine/compiler like ONNX Runtime.\n\nThe [ONNX Model Zoo](https://github.com/onnx/models) is a collection of pre-trained, state-of-the-art models in the ONNX format contributed by community members. Accompanying each model are Jupyter notebooks for model training and running inference with the trained model. The notebooks are written in Python and include links to the training dataset as well as references to the original paper that describes the model architecture.\n\n\n## What are Hugging Face Spaces & Gradio?\n\n### Gradio\n\nGradio lets users demo their machine learning models as a web app all in python code. Gradio wraps a python function into a user interface and the demos can be launched inside jupyter notebooks, colab notebooks, as well as embedded in your own website and hosted on Hugging Face Spaces for free.\n\nGet started [here](https://gradio.app/getting_started)\n\n### Hugging Face Spaces\n\nHugging Face Spaces is a free hosting option for Gradio demos. Spaces comes with 3 SDK options: Gradio, Streamlit and Static HTML demos. Spaces can be public or private and the workflow is similar to github repos. There are over 2000+ spaces currently on Hugging Face. Learn more about spaces [here](https://huggingface.co/spaces/launch).\n\n### Hugging Face Models\n\nHugging Face Model Hub also supports ONNX models and ONNX models can be filtered through the [ONNX tag](https://huggingface.co/models?library=onnx&sort=downloads)\n\n## How did Hugging Face help the ONNX Model Zoo?\nThere are a lot of Jupyter notebooks in the ONNX Model Zoo for users to test models. Previously, users needed to download the models themselves and run those notebooks locally for testing. With Hugging Face, the testing process can be much simpler and more user-friendly. Users can easily try certain ONNX Model Zoo model on Hugging Face Spaces and run a quick demo powered by Gradio with ONNX Runtime, all on cloud without downloading anything locally. Note, there are various runtimes for ONNX, e.g., [ONNX Runtime](https://github.com/microsoft/onnxruntime), [MXNet](https://github.com/apache/incubator-mxnet).\n\n## What is the role of ONNX Runtime?\nONNX Runtime is a cross-platform inference and training machine-learning accelerator. It makes live Gradio demos with ONNX Model Zoo model on Hugging Face possible.\n\nONNX Runtime inference can enable faster customer experiences and lower costs, supporting models from deep learning frameworks such as PyTorch and TensorFlow/Keras as well as classical machine learning libraries such as scikit-learn, LightGBM, XGBoost, etc. ONNX Runtime is compatible with different hardware, drivers, and operating systems, and provides optimal performance by leveraging hardware accelerators where applicable alongside graph optimizations and transforms. For more information please see the [official website](https://onnxruntime.ai/).\n\n## Setting up a Gradio Demo for EfficientNet-Lite4\n\nEfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite models. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU. To learn more read the [model card](https://github.com/onnx/models/tree/main/vision/classification/efficientnet-lite4)\n\nHere we walk through setting up a example demo for EfficientNet-Lite4 using Gradio\n\nFirst we import our dependencies and download and load the efficientnet-lite4 model from the onnx model zoo. Then load the labels from the labels_map.txt file. We then setup our preprocessing functions, load the model for inference, and setup the inference function. Finally, the inference function is wrapped into a gradio interface for a user to interact with. See the full code below.\n\n\n```python\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport cv2\nimport json\nimport gradio as gr\nfrom huggingface_hub import hf_hub_download\nfrom onnx import hub\nimport onnxruntime as ort\n\n# loads ONNX model from ONNX Model Zoo\nmodel = hub.load(\"efficientnet-lite4\")\n# loads the labels text file\nlabels = json.load(open(\"labels_map.txt\", \"r\"))\n\n# sets image file dimensions to 224x224 by resizing and cropping image from center\ndef pre_process_edgetpu(img, dims):\n output_height, output_width, _ = dims\n img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)\n img = center_crop(img, output_height, output_width)\n img = np.asarray(img, dtype='float32')\n # converts jpg pixel value from [0 - 255] to float array [-1.0 - 1.0]\n img -= [127.0, 127.0, 127.0]\n img /= [128.0, 128.0, 128.0]\n return img\n\n# resizes the image with a proportional scale\ndef resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):\n height, width, _ = img.shape\n new_height = int(100. * out_height / scale)\n new_width = int(100. * out_width / scale)\n if height > width:\n w = new_width\n h = int(new_height * height / width)\n else:\n h = new_height\n w = int(new_width * width / height)\n img = cv2.resize(img, (w, h), interpolation=inter_pol)\n return img\n\n# crops the image around the center based on given height and width\ndef center_crop(img, out_height, out_width):\n height, width, _ = img.shape\n left = int((width - out_width) / 2)\n right = int((width + out_width) / 2)\n top = int((height - out_height) / 2)\n bottom = int((height + out_height) / 2)\n img = img[top:bottom, left:right]\n return img\n\n\nsess = ort.InferenceSession(model)\n\ndef inference(img):\n img = cv2.imread(img)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n \n img = pre_process_edgetpu(img, (224, 224, 3))\n \n img_batch = np.expand_dims(img, axis=0)\n\n results = sess.run([\"Softmax:0\"], {\"images:0\": img_batch})[0]\n result = reversed(results[0].argsort()[-5:])\n resultdic = {}\n for r in result:\n resultdic[labels[str(r)]] = float(results[0][r])\n return resultdic\n \ntitle = \"EfficientNet-Lite4\"\ndescription = \"EfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite model. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU.\"\nexamples = [['catonnx.jpg']]\ngr.Interface(inference, gr.Image(type=\"filepath\"), \"label\", title=title, description=description, examples=examples).launch()\n```\n\n\n## How to contribute Gradio demos on HF spaces using ONNX models\n\n* Add model to the [onnx model zoo](https://github.com/onnx/models/blob/main/.github/PULL_REQUEST_TEMPLATE.md)\n* Create an account on Hugging Face [here](https://huggingface.co/join).\n* See list of models left to add to ONNX organization, please refer to the table with the [Models list](https://github.com/onnx/models#models)\n* Add Gradio Demo under your username, see this [blog post](https://huggingface.co/blog/gradio-spaces) for setting up Gradio Demo on Hugging Face. \n* Request to join ONNX Organization [here](https://huggingface.co/onnx).\n* Once approved transfer model from your username to ONNX organization\n* Add a badge for model in model table, see examples in [Models list](https://github.com/onnx/models#models)\n", "html": "

Gradio and ONNX on Hugging Face

\n\n

Introduction

\n\n

In this Guide, we'll walk you through:

\n\n
    \n
  • Introduction of ONNX, ONNX model zoo, Gradio, and Hugging Face Spaces
  • \n
  • How to setup a Gradio demo for EfficientNet-Lite4
  • \n
  • How to contribute your own Gradio demos for the ONNX organization on Hugging Face
  • \n
\n\n

Here's an example of an ONNX model: try out the EfficientNet-Lite4 demo below.

\n\n\n\n

What is the ONNX Model Zoo?

\n\n

Open Neural Network Exchange (ONNX) is an open standard format for representing machine learning models. ONNX is supported by a community of partners who have implemented it in many frameworks and tools. For example, if you have trained a model in TensorFlow or PyTorch, you can convert it to ONNX easily, and from there run it on a variety of devices using an engine/compiler like ONNX Runtime.

\n\n

The ONNX Model Zoo is a collection of pre-trained, state-of-the-art models in the ONNX format contributed by community members. Accompanying each model are Jupyter notebooks for model training and running inference with the trained model. The notebooks are written in Python and include links to the training dataset as well as references to the original paper that describes the model architecture.

\n\n

What are Hugging Face Spaces & Gradio?

\n\n

Gradio

\n\n

Gradio lets users demo their machine learning models as a web app all in python code. Gradio wraps a python function into a user interface and the demos can be launched inside jupyter notebooks, colab notebooks, as well as embedded in your own website and hosted on Hugging Face Spaces for free.

\n\n

Get started here

\n\n

Hugging Face Spaces

\n\n

Hugging Face Spaces is a free hosting option for Gradio demos. Spaces comes with 3 SDK options: Gradio, Streamlit and Static HTML demos. Spaces can be public or private and the workflow is similar to github repos. There are over 2000+ spaces currently on Hugging Face. Learn more about spaces here.

\n\n

Hugging Face Models

\n\n

Hugging Face Model Hub also supports ONNX models and ONNX models can be filtered through the ONNX tag

\n\n

How did Hugging Face help the ONNX Model Zoo?

\n\n

There are a lot of Jupyter notebooks in the ONNX Model Zoo for users to test models. Previously, users needed to download the models themselves and run those notebooks locally for testing. With Hugging Face, the testing process can be much simpler and more user-friendly. Users can easily try certain ONNX Model Zoo model on Hugging Face Spaces and run a quick demo powered by Gradio with ONNX Runtime, all on cloud without downloading anything locally. Note, there are various runtimes for ONNX, e.g., ONNX Runtime, MXNet.

\n\n

What is the role of ONNX Runtime?

\n\n

ONNX Runtime is a cross-platform inference and training machine-learning accelerator. It makes live Gradio demos with ONNX Model Zoo model on Hugging Face possible.

\n\n

ONNX Runtime inference can enable faster customer experiences and lower costs, supporting models from deep learning frameworks such as PyTorch and TensorFlow/Keras as well as classical machine learning libraries such as scikit-learn, LightGBM, XGBoost, etc. ONNX Runtime is compatible with different hardware, drivers, and operating systems, and provides optimal performance by leveraging hardware accelerators where applicable alongside graph optimizations and transforms. For more information please see the official website.

\n\n

Setting up a Gradio Demo for EfficientNet-Lite4

\n\n

EfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite models. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU. To learn more read the model card

\n\n

Here we walk through setting up a example demo for EfficientNet-Lite4 using Gradio

\n\n

First we import our dependencies and download and load the efficientnet-lite4 model from the onnx model zoo. Then load the labels from the labels_map.txt file. We then setup our preprocessing functions, load the model for inference, and setup the inference function. Finally, the inference function is wrapped into a gradio interface for a user to interact with. See the full code below.

\n\n
import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport cv2\nimport json\nimport gradio as gr\nfrom huggingface_hub import hf_hub_download\nfrom onnx import hub\nimport onnxruntime as ort\n\n# loads ONNX model from ONNX Model Zoo\nmodel = hub.load(\"efficientnet-lite4\")\n# loads the labels text file\nlabels = json.load(open(\"labels_map.txt\", \"r\"))\n\n# sets image file dimensions to 224x224 by resizing and cropping image from center\ndef pre_process_edgetpu(img, dims):\n    output_height, output_width, _ = dims\n    img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)\n    img = center_crop(img, output_height, output_width)\n    img = np.asarray(img, dtype='float32')\n    # converts jpg pixel value from [0 - 255] to float array [-1.0 - 1.0]\n    img -= [127.0, 127.0, 127.0]\n    img /= [128.0, 128.0, 128.0]\n    return img\n\n# resizes the image with a proportional scale\ndef resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):\n    height, width, _ = img.shape\n    new_height = int(100. * out_height / scale)\n    new_width = int(100. * out_width / scale)\n    if height > width:\n        w = new_width\n        h = int(new_height * height / width)\n    else:\n        h = new_height\n        w = int(new_width * width / height)\n    img = cv2.resize(img, (w, h), interpolation=inter_pol)\n    return img\n\n# crops the image around the center based on given height and width\ndef center_crop(img, out_height, out_width):\n    height, width, _ = img.shape\n    left = int((width - out_width) / 2)\n    right = int((width + out_width) / 2)\n    top = int((height - out_height) / 2)\n    bottom = int((height + out_height) / 2)\n    img = img[top:bottom, left:right]\n    return img\n\n\nsess = ort.InferenceSession(model)\n\ndef inference(img):\n  img = cv2.imread(img)\n  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n  img = pre_process_edgetpu(img, (224, 224, 3))\n\n  img_batch = np.expand_dims(img, axis=0)\n\n  results = sess.run([\"Softmax:0\"], {\"images:0\": img_batch})[0]\n  result = reversed(results[0].argsort()[-5:])\n  resultdic = {}\n  for r in result:\n      resultdic[labels[str(r)]] = float(results[0][r])\n  return resultdic\n\ntitle = \"EfficientNet-Lite4\"\ndescription = \"EfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite model. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU.\"\nexamples = [['catonnx.jpg']]\ngr.Interface(inference, gr.Image(type=\"filepath\"), \"label\", title=title, description=description, examples=examples).launch()\n
\n\n

How to contribute Gradio demos on HF spaces using ONNX models

\n\n
    \n
  • Add model to the onnx model zoo
  • \n
  • Create an account on Hugging Face here.
  • \n
  • See list of models left to add to ONNX organization, please refer to the table with the Models list
  • \n
  • Add Gradio Demo under your username, see this blog post for setting up Gradio Demo on Hugging Face.
  • \n
  • Request to join ONNX Organization here.
  • \n
  • Once approved transfer model from your username to ONNX organization
  • \n
  • Add a badge for model in model table, see examples in Models list
  • \n
\n", "tags": ["ONNX", "SPACES"], "spaces": ["https://huggingface.co/spaces/onnx/EfficientNet-Lite4"], "url": "/guides/Gradio-and-ONNX-on-Hugging-Face/", "contributor": "Gradio and the ONNX team"}, {"name": "Gradio-and-Wandb-Integration", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 19, "pretty_name": "Gradio And Wandb Integration", "content": "# Gradio and W&B Integration\n\n\n\n\n\n## Introduction\n\nIn this Guide, we'll walk you through:\n\n* Introduction of Gradio, and Hugging Face Spaces, and Wandb\n* How to setup a Gradio demo using the Wandb integration for JoJoGAN\n* How to contribute your own Gradio demos after tracking your experiments on wandb to the Wandb organization on Hugging Face\n\nHere's an example of an model trained and experiments tracked on wandb, try out the JoJoGAN demo below.\n\n\n\n## What is Wandb?\n\nWeights and Biases (W&B) allows data scientists and machine learning scientists to track their machine learning experiments at every stage, from training to production. Any metric can be aggregated over samples and shown in panels in a customizable and searchable dashboard, like below:\n\n\"Screen\n\n\n## What are Hugging Face Spaces & Gradio?\n\n### Gradio\n\nGradio lets users demo their machine learning models as a web app, all in a few lines of Python. Gradio wraps any Python function (such as a machine learning model's inference function) into a user interface and the demos can be launched inside jupyter notebooks, colab notebooks, as well as embedded in your own website and hosted on Hugging Face Spaces for free.\n\nGet started [here](https://gradio.app/getting_started)\n\n### Hugging Face Spaces\n\nHugging Face Spaces is a free hosting option for Gradio demos. Spaces comes with 3 SDK options: Gradio, Streamlit and Static HTML demos. Spaces can be public or private and the workflow is similar to github repos. There are over 2000+ spaces currently on Hugging Face. Learn more about spaces [here](https://huggingface.co/spaces/launch).\n\n\n## Setting up a Gradio Demo for JoJoGAN\n\nNow, let's walk you through how to do this on your own. We'll make the assumption that you're new to W&B and Gradio for the purposes of this tutorial. \n\nLet's get started!\n\n1. Create a W&B account\n\n Follow [these quick instructions](https://app.wandb.ai/login) to create your free account if you don\u2019t have one already. It shouldn't take more than a couple minutes. Once you're done (or if you've already got an account), next, we'll run a quick colab. \n\n2. Open Colab Install Gradio and W&B\n\n We'll be following along with the colab provided in the JoJoGAN repo with some minor modifications to use Wandb and Gradio more effectively. \n\n [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mchong6/JoJoGAN/blob/main/stylize.ipynb)\n\n Install Gradio and Wandb at the top:\n\n```sh\n\npip install gradio wandb\n```\n\n3. Finetune StyleGAN and W&B experiment tracking\n\n This next step will open a W&B dashboard to track your experiments and a gradio panel showing pretrained models to choose from a drop down menu from a Gradio Demo hosted on Huggingface Spaces. Here's the code you need for that:\n\n ```python\n \n alpha = 1.0 \n alpha = 1-alpha\n\n preserve_color = True \n num_iter = 100 \n log_interval = 50 \n\n\n samples = []\n column_names = [\"Reference (y)\", \"Style Code(w)\", \"Real Face Image(x)\"]\n\n wandb.init(project=\"JoJoGAN\")\n config = wandb.config\n config.num_iter = num_iter\n config.preserve_color = preserve_color\n wandb.log(\n {\"Style reference\": [wandb.Image(transforms.ToPILImage()(target_im))]},\n step=0)\n\n # load discriminator for perceptual loss\n discriminator = Discriminator(1024, 2).eval().to(device)\n ckpt = torch.load('models/stylegan2-ffhq-config-f.pt', map_location=lambda storage, loc: storage)\n discriminator.load_state_dict(ckpt[\"d\"], strict=False)\n\n # reset generator\n del generator\n generator = deepcopy(original_generator)\n\n g_optim = optim.Adam(generator.parameters(), lr=2e-3, betas=(0, 0.99))\n\n # Which layers to swap for generating a family of plausible real images -> fake image\n if preserve_color:\n id_swap = [9,11,15,16,17]\n else:\n id_swap = list(range(7, generator.n_latent))\n\n for idx in tqdm(range(num_iter)):\n mean_w = generator.get_latent(torch.randn([latents.size(0), latent_dim]).to(device)).unsqueeze(1).repeat(1, generator.n_latent, 1)\n in_latent = latents.clone()\n in_latent[:, id_swap] = alpha*latents[:, id_swap] + (1-alpha)*mean_w[:, id_swap]\n\n img = generator(in_latent, input_is_latent=True)\n\n with torch.no_grad():\n real_feat = discriminator(targets)\n fake_feat = discriminator(img)\n\n loss = sum([F.l1_loss(a, b) for a, b in zip(fake_feat, real_feat)])/len(fake_feat)\n \n\n wandb.log({\"loss\": loss}, step=idx)\n if idx % log_interval == 0:\n generator.eval()\n my_sample = generator(my_w, input_is_latent=True)\n generator.train()\n my_sample = transforms.ToPILImage()(utils.make_grid(my_sample, normalize=True, range=(-1, 1)))\n wandb.log(\n {\"Current stylization\": [wandb.Image(my_sample)]},\n step=idx)\n table_data = [\n wandb.Image(transforms.ToPILImage()(target_im)),\n wandb.Image(img),\n wandb.Image(my_sample),\n ]\n samples.append(table_data)\n\n g_optim.zero_grad()\n loss.backward()\n g_optim.step()\n\n out_table = wandb.Table(data=samples, columns=column_names)\n wandb.log({\"Current Samples\": out_table})\n ```\n\nalpha = 1.0 \nalpha = 1-alpha\n\npreserve_color = True \nnum_iter = 100 \nlog_interval = 50 \n\n\nsamples = []\ncolumn_names = [\"Referece (y)\", \"Style Code(w)\", \"Real Face Image(x)\"]\n\nwandb.init(project=\"JoJoGAN\")\nconfig = wandb.config\nconfig.num_iter = num_iter\nconfig.preserve_color = preserve_color\nwandb.log(\n{\"Style reference\": [wandb.Image(transforms.ToPILImage()(target_im))]},\nstep=0)\n\n# load discriminator for perceptual loss\ndiscriminator = Discriminator(1024, 2).eval().to(device)\nckpt = torch.load('models/stylegan2-ffhq-config-f.pt', map_location=lambda storage, loc: storage)\ndiscriminator.load_state_dict(ckpt[\"d\"], strict=False)\n\n# reset generator\ndel generator\ngenerator = deepcopy(original_generator)\n\ng_optim = optim.Adam(generator.parameters(), lr=2e-3, betas=(0, 0.99))\n\n# Which layers to swap for generating a family of plausible real images -> fake image\nif preserve_color:\n id_swap = [9,11,15,16,17]\nelse:\n id_swap = list(range(7, generator.n_latent))\n\nfor idx in tqdm(range(num_iter)):\n mean_w = generator.get_latent(torch.randn([latents.size(0), latent_dim]).to(device)).unsqueeze(1).repeat(1, generator.n_latent, 1)\n in_latent = latents.clone()\n in_latent[:, id_swap] = alpha*latents[:, id_swap] + (1-alpha)*mean_w[:, id_swap]\n\n img = generator(in_latent, input_is_latent=True)\n\n with torch.no_grad():\n real_feat = discriminator(targets)\n fake_feat = discriminator(img)\n\n loss = sum([F.l1_loss(a, b) for a, b in zip(fake_feat, real_feat)])/len(fake_feat)\n \n\n wandb.log({\"loss\": loss}, step=idx)\n if idx % log_interval == 0:\n generator.eval()\n my_sample = generator(my_w, input_is_latent=True)\n generator.train()\n my_sample = transforms.ToPILImage()(utils.make_grid(my_sample, normalize=True, range=(-1, 1)))\n wandb.log(\n {\"Current stylization\": [wandb.Image(my_sample)]},\n step=idx)\n table_data = [\n wandb.Image(transforms.ToPILImage()(target_im)),\n wandb.Image(img),\n wandb.Image(my_sample),\n ]\n samples.append(table_data)\n\n g_optim.zero_grad()\n loss.backward()\n g_optim.step()\n\nout_table = wandb.Table(data=samples, columns=column_names)\nwandb.log({\"Current Samples\": out_table})\n```\n\n4. Save, Download, and Load Model\n\n Here's how to save and download your model.\n\n```python\n\nfrom PIL import Image\nimport torch\ntorch.backends.cudnn.benchmark = True\nfrom torchvision import transforms, utils\nfrom util import *\nimport math\nimport random\nimport numpy as np\nfrom torch import nn, autograd, optim\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\nimport lpips\nfrom model import *\nfrom e4e_projection import projection as e4e_projection\n\nfrom copy import deepcopy\nimport imageio\n\nimport os\nimport sys\nimport torchvision.transforms as transforms\nfrom argparse import Namespace\nfrom e4e.models.psp import pSp\nfrom util import *\nfrom huggingface_hub import hf_hub_download\nfrom google.colab import files\n\ntorch.save({\"g\": generator.state_dict()}, \"your-model-name.pt\")\n\nfiles.download('your-model-name.pt') \n\nlatent_dim = 512\ndevice=\"cuda\"\nmodel_path_s = hf_hub_download(repo_id=\"akhaliq/jojogan-stylegan2-ffhq-config-f\", filename=\"stylegan2-ffhq-config-f.pt\")\noriginal_generator = Generator(1024, latent_dim, 8, 2).to(device)\nckpt = torch.load(model_path_s, map_location=lambda storage, loc: storage)\noriginal_generator.load_state_dict(ckpt[\"g_ema\"], strict=False)\nmean_latent = original_generator.mean_latent(10000)\n\ngenerator = deepcopy(original_generator)\n\nckpt = torch.load(\"/content/JoJoGAN/your-model-name.pt\", map_location=lambda storage, loc: storage)\ngenerator.load_state_dict(ckpt[\"g\"], strict=False)\ngenerator.eval()\n\nplt.rcParams['figure.dpi'] = 150\n\n\n\ntransform = transforms.Compose(\n [\n transforms.Resize((1024, 1024)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]\n)\n\n\ndef inference(img): \n img.save('out.jpg') \n aligned_face = align_face('out.jpg')\n\n my_w = e4e_projection(aligned_face, \"out.pt\", device).unsqueeze(0) \n with torch.no_grad():\n my_sample = generator(my_w, input_is_latent=True)\n \n \n npimage = my_sample[0].cpu().permute(1, 2, 0).detach().numpy()\n imageio.imwrite('filename.jpeg', npimage)\n return 'filename.jpeg'\n```\n\n5. Build a Gradio Demo\n\n```python\n\nimport gradio as gr\n\ntitle = \"JoJoGAN\"\ndescription = \"Gradio Demo for JoJoGAN: One Shot Face Stylization. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.\"\n\ndemo = gr.Interface(\n inference, \n gr.Image(type=\"pil\"), \n gr.Image(type=\"file\"),\n title=title,\n description=description\n)\n\ndemo.launch(share=True)\n```\n\n6. Integrate Gradio into your W&B Dashboard\n\n The last step\u2014integrating your Gradio demo with your W&B dashboard\u2014is just one extra line:\n\n```python\n\ndemo.integrate(wandb=wandb)\n```\n\n Once you call integrate, a demo will be created and you can integrate it into your dashboard or report\n\n Outside of W&B with Web components, using the gradio-app tags allows anyone can embed Gradio demos on HF spaces directly into their blogs, websites, documentation, etc.:\n\n```html\n\n \n```\n\n\n7. (Optional) Embed W&B plots in your Gradio App\n\n It's also possible to embed W&B plots within Gradio apps. To do so, you can create a W&B Report of your plots and \n embed them within your Gradio app within a `gr.HTML` block. \n\n The Report will need to be public and you will need to wrap the URL within an iFrame like this: \n```python\n\nimport gradio as gr\n\ndef wandb_report(url):\n iframe = f'\n\n

What is Wandb?

\n\n

Weights and Biases (W&B) allows data scientists and machine learning scientists to track their machine learning experiments at every stage, from training to production. Any metric can be aggregated over samples and shown in panels in a customizable and searchable dashboard, like below:

\n\n

\"Screen

\n\n

What are Hugging Face Spaces & Gradio?

\n\n

Gradio

\n\n

Gradio lets users demo their machine learning models as a web app, all in a few lines of Python. Gradio wraps any Python function (such as a machine learning model's inference function) into a user interface and the demos can be launched inside jupyter notebooks, colab notebooks, as well as embedded in your own website and hosted on Hugging Face Spaces for free.

\n\n

Get started here

\n\n

Hugging Face Spaces

\n\n

Hugging Face Spaces is a free hosting option for Gradio demos. Spaces comes with 3 SDK options: Gradio, Streamlit and Static HTML demos. Spaces can be public or private and the workflow is similar to github repos. There are over 2000+ spaces currently on Hugging Face. Learn more about spaces here.

\n\n

Setting up a Gradio Demo for JoJoGAN

\n\n

Now, let's walk you through how to do this on your own. We'll make the assumption that you're new to W&B and Gradio for the purposes of this tutorial.

\n\n

Let's get started!

\n\n
    \n
  1. Create a W&B account

    \n\n

    Follow these quick instructions to create your free account if you don\u2019t have one already. It shouldn't take more than a couple minutes. Once you're done (or if you've already got an account), next, we'll run a quick colab.

  2. \n
  3. Open Colab Install Gradio and W&B

    \n\n

    We'll be following along with the colab provided in the JoJoGAN repo with some minor modifications to use Wandb and Gradio more effectively.

    \n\n

    \"Open

    \n\n

    Install Gradio and Wandb at the top:

  4. \n
\n\n
\npip install gradio wandb\n
\n\n
    \n
  1. Finetune StyleGAN and W&B experiment tracking

    \n\n

    This next step will open a W&B dashboard to track your experiments and a gradio panel showing pretrained models to choose from a drop down menu from a Gradio Demo hosted on Huggingface Spaces. Here's the code you need for that:

    \n\n
        \nalpha =  1.0 \nalpha = 1-alpha\n\npreserve_color = True \nnum_iter = 100 \nlog_interval = 50 \n\n\nsamples = []\ncolumn_names = [\"Reference (y)\", \"Style Code(w)\", \"Real Face Image(x)\"]\n\nwandb.init(project=\"JoJoGAN\")\nconfig = wandb.config\nconfig.num_iter = num_iter\nconfig.preserve_color = preserve_color\nwandb.log(\n{\"Style reference\": [wandb.Image(transforms.ToPILImage()(target_im))]},\nstep=0)\n\n

    load discriminator for perceptual loss

    \n\ndiscriminator = Discriminator(1024, 2).eval().to(device)\nckpt = torch.load('models/stylegan2-ffhq-config-f.pt', map_location=lambda storage, loc: storage)\ndiscriminator.load_state_dict(ckpt[\"d\"], strict=False)\n\n

    reset generator

    \n\ndel generator\ngenerator = deepcopy(original_generator)\n\ng_optim = optim.Adam(generator.parameters(), lr=2e-3, betas=(0, 0.99))\n\n

    Which layers to swap for generating a family of plausible real images -> fake image

    \n\nif preserve_color:\n id_swap = [9,11,15,16,17]\nelse:\n id_swap = list(range(7, generator.n_latent))\n\nfor idx in tqdm(range(num_iter)):\n mean_w = generator.get_latent(torch.randn([latents.size(0), latent_dim]).to(device)).unsqueeze(1).repeat(1, generator.n_latent, 1)\n in_latent = latents.clone()\n in_latent[:, id_swap] = alpha*latents[:, id_swap] + (1-alpha)*mean_w[:, id_swap]\n\n img = generator(in_latent, input_is_latent=True)\n\n with torch.no_grad():\n real_feat = discriminator(targets)\n fake_feat = discriminator(img)\n\n loss = sum([F.l1_loss(a, b) for a, b in zip(fake_feat, real_feat)])/len(fake_feat)\n\n\n wandb.log({\"loss\": loss}, step=idx)\n if idx % log_interval == 0:\n generator.eval()\n my_sample = generator(my_w, input_is_latent=True)\n generator.train()\n my_sample = transforms.ToPILImage()(utils.make_grid(my_sample, normalize=True, range=(-1, 1)))\n wandb.log(\n {\"Current stylization\": [wandb.Image(my_sample)]},\n step=idx)\n table_data = [\n wandb.Image(transforms.ToPILImage()(target_im)),\n wandb.Image(img),\n wandb.Image(my_sample),\n ]\n samples.append(table_data)\n\n g_optim.zero_grad()\n loss.backward()\n g_optim.step()\n\nout_table = wandb.Table(data=samples, columns=column_names)\nwandb.log({\"Current Samples\": out_table})\n
  2. \n
\n\n

alpha = 1.0 \nalpha = 1-alpha

\n\n

preservecolor = True \nnumiter = 100 \nlog_interval = 50

\n\n

samples = []\ncolumn_names = [\"Referece (y)\", \"Style Code(w)\", \"Real Face Image(x)\"]

\n\n

wandb.init(project=\"JoJoGAN\")\nconfig = wandb.config\nconfig.numiter = numiter\nconfig.preservecolor = preservecolor\nwandb.log(\n{\"Style reference\": [wandb.Image(transforms.ToPILImage()(target_im))]},\nstep=0)

\n\n

load discriminator for perceptual loss

\n\n

discriminator = Discriminator(1024, 2).eval().to(device)\nckpt = torch.load('models/stylegan2-ffhq-config-f.pt', maplocation=lambda storage, loc: storage)\ndiscriminator.loadstate_dict(ckpt[\"d\"], strict=False)

\n\n

reset generator

\n\n

del generator\ngenerator = deepcopy(original_generator)

\n\n

g_optim = optim.Adam(generator.parameters(), lr=2e-3, betas=(0, 0.99))

\n\n

Which layers to swap for generating a family of plausible real images -> fake image

\n\n

if preservecolor:\n idswap = [9,11,15,16,17]\nelse:\n idswap = list(range(7, generator.nlatent))

\n\n

for idx in tqdm(range(numiter)):\n meanw = generator.getlatent(torch.randn([latents.size(0), latentdim]).to(device)).unsqueeze(1).repeat(1, generator.nlatent, 1)\n inlatent = latents.clone()\n inlatent[:, idswap] = alphalatents[:, id_swap] + (1-alpha)meanw[:, idswap]

\n\n

img = generator(inlatent, inputis_latent=True)

\n\n

with torch.nograd():\n realfeat = discriminator(targets)\n fake_feat = discriminator(img)

\n\n

loss = sum([F.l1loss(a, b) for a, b in zip(fakefeat, realfeat)])/len(fakefeat)

\n\n

wandb.log({\"loss\": loss}, step=idx)\n if idx % loginterval == 0:\n generator.eval()\n mysample = generator(myw, inputislatent=True)\n generator.train()\n mysample = transforms.ToPILImage()(utils.makegrid(mysample, normalize=True, range=(-1, 1)))\n wandb.log(\n {\"Current stylization\": [wandb.Image(mysample)]},\n step=idx)\n tabledata = [\n wandb.Image(transforms.ToPILImage()(targetim)),\n wandb.Image(img),\n wandb.Image(mysample),\n ]\n samples.append(table_data)

\n\n

goptim.zerograd()\n loss.backward()\n g_optim.step()

\n\n

outtable = wandb.Table(data=samples, columns=columnnames)\nwandb.log({\"Current Samples\": out_table})\n

\n\n
    \n
  1. Save, Download, and Load Model

    \n\n

    Here's how to save and download your model.

  2. \n
\n\n
\nfrom PIL import Image\nimport torch\ntorch.backends.cudnn.benchmark = True\nfrom torchvision import transforms, utils\nfrom util import *\nimport math\nimport random\nimport numpy as np\nfrom torch import nn, autograd, optim\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\nimport lpips\nfrom model import *\nfrom e4e_projection import projection as e4e_projection\n\nfrom copy import deepcopy\nimport imageio\n\nimport os\nimport sys\nimport torchvision.transforms as transforms\nfrom argparse import Namespace\nfrom e4e.models.psp import pSp\nfrom util import *\nfrom huggingface_hub import hf_hub_download\nfrom google.colab import files\n\ntorch.save({\"g\": generator.state_dict()}, \"your-model-name.pt\")\n\nfiles.download('your-model-name.pt') \n\nlatent_dim = 512\ndevice=\"cuda\"\nmodel_path_s = hf_hub_download(repo_id=\"akhaliq/jojogan-stylegan2-ffhq-config-f\", filename=\"stylegan2-ffhq-config-f.pt\")\noriginal_generator = Generator(1024, latent_dim, 8, 2).to(device)\nckpt = torch.load(model_path_s, map_location=lambda storage, loc: storage)\noriginal_generator.load_state_dict(ckpt[\"g_ema\"], strict=False)\nmean_latent = original_generator.mean_latent(10000)\n\ngenerator = deepcopy(original_generator)\n\nckpt = torch.load(\"/content/JoJoGAN/your-model-name.pt\", map_location=lambda storage, loc: storage)\ngenerator.load_state_dict(ckpt[\"g\"], strict=False)\ngenerator.eval()\n\nplt.rcParams['figure.dpi'] = 150\n\n\n\ntransform = transforms.Compose(\n    [\n        transforms.Resize((1024, 1024)),\n        transforms.ToTensor(),\n        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n    ]\n)\n\n\ndef inference(img):  \n    img.save('out.jpg')  \n    aligned_face = align_face('out.jpg')\n\n    my_w = e4e_projection(aligned_face, \"out.pt\", device).unsqueeze(0)  \n    with torch.no_grad():\n        my_sample = generator(my_w, input_is_latent=True)\n\n\n    npimage = my_sample[0].cpu().permute(1, 2, 0).detach().numpy()\n    imageio.imwrite('filename.jpeg', npimage)\n    return 'filename.jpeg'\n
\n\n
    \n
  1. Build a Gradio Demo
  2. \n
\n\n
\nimport gradio as gr\n\ntitle = \"JoJoGAN\"\ndescription = \"Gradio Demo for JoJoGAN: One Shot Face Stylization. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.\"\n\ndemo = gr.Interface(\n    inference, \n    gr.Image(type=\"pil\"), \n    gr.Image(type=\"file\"),\n    title=title,\n    description=description\n)\n\ndemo.launch(share=True)\n
\n\n
    \n
  1. Integrate Gradio into your W&B Dashboard

    \n\n

    The last step\u2014integrating your Gradio demo with your W&B dashboard\u2014is just one extra line:

  2. \n
\n\n
\ndemo.integrate(wandb=wandb)\n
\n\n
Once you call integrate, a demo will be created and you can integrate it into your dashboard or report\n\nOutside of W&B with Web components, using the gradio-app tags allows anyone can embed Gradio demos on HF spaces directly into their blogs, websites, documentation, etc.:\n
\n\n
\n \n
\n\n
    \n
  1. (Optional) Embed W&B plots in your Gradio App

    \n\n

    It's also possible to embed W&B plots within Gradio apps. To do so, you can create a W&B Report of your plots and \nembed them within your Gradio app within a gr.HTML block.

    \n\n

    The Report will need to be public and you will need to wrap the URL within an iFrame like this:

  2. \n
\n\n
\nimport gradio as gr\n\ndef wandb_report(url):\n    iframe = f'\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained image classification model, so you should also have `torch` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from [PyTorch Hub](https://pytorch.org/hub/pytorch_vision_resnet/). You can use a different pretrained model or train your own. \n\n```python\nimport torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n```\n\nBecause we will be using the model for inference, we have called the `.eval()` method.\n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n  inp = transforms.ToTensor()(inp).unsqueeze(0)\n  with torch.no_grad():\n    prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n    confidences = {labels[i]: float(prediction[i]) for i in range(1000)}    \n  return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `PIL` image\n\nThen, the function converts the image to a PIL Image and then eventually a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we use `Image(type=\"pil\")` which creates the component and handles the preprocessing to convert that to a `PIL` image. \n\nThe output component will be a `Label`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as `Label(num_top_classes=3)`.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=gr.Image(type=\"pil\"),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification in PyTorch

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained image classification model, so you should also have torch installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from PyTorch Hub. You can use a different pretrained model or train your own.

\n\n
import torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n
\n\n

Because we will be using the model for inference, we have called the .eval() method.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n  inp = transforms.ToTensor()(inp).unsqueeze(0)\n  with torch.no_grad():\n    prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n    confidences = {labels[i]: float(prediction[i]) for i in range(1000)}    \n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a PIL image
  • \n
\n\n

Then, the function converts the image to a PIL Image and then eventually a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we use Image(type=\"pil\") which creates the component and handles the preprocessing to convert that to a PIL image.

\n\n

The output component will be a Label, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as Label(num_top_classes=3).

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=gr.Image(type=\"pil\"),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "RESNET", "PYTORCH"], "spaces": ["https://huggingface.co/spaces/abidlabs/pytorch-image-classifier", "https://huggingface.co/spaces/pytorch/ResNet", "https://huggingface.co/spaces/pytorch/ResNext", "https://huggingface.co/spaces/pytorch/SqueezeNet"], "url": "/guides/image-classification-in-pytorch/", "contributor": null}, {"name": "image-classification-in-tensorflow", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 21, "pretty_name": "Image Classification In Tensorflow", "content": "# Image Classification in TensorFlow and Keras\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained Keras image classification model, so you should also have `tensorflow` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from [Keras](https://keras.io/api/applications/mobilenet/). You can use a different pretrained model or train your own. \n\n```python\nimport tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n```\n\nThis line automatically downloads the MobileNet model and weights using the Keras library. \n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `numpy` array\n\nThen, the function adds a batch dimension, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we can use the `\"gradio.inputs.Image\"` class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.\n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=classify_image, \n inputs=gr.Image(shape=(224, 224)),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification in TensorFlow and Keras

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained Keras image classification model, so you should also have tensorflow installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from Keras. You can use a different pretrained model or train your own.

\n\n
import tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n
\n\n

This line automatically downloads the MobileNet model and weights using the Keras library.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n  inp = inp.reshape((-1, 224, 224, 3))\n  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n  prediction = inception_net.predict(inp).flatten()\n  confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a numpy array
  • \n
\n\n

Then, the function adds a batch dimension, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we can use the \"gradio.inputs.Image\" class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=classify_image, \n             inputs=gr.Image(shape=(224, 224)),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "MOBILENET", "TENSORFLOW"], "spaces": ["https://huggingface.co/spaces/abidlabs/keras-image-classifier"], "url": "/guides/image-classification-in-tensorflow/", "contributor": null}, {"name": "image-classification-with-vision-transformers", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 22, "pretty_name": "Image Classification With Vision Transformers", "content": "# Image Classification with Vision Transformers\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control. \n\nState-of-the-art image classifiers are based on the *transformers* architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a **single line of Python**, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started).\n\n## Step 1 \u2014 Choosing a Vision Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a model from the [Hugging Face Model Hub](https://huggingface.co/models?pipeline_tag=image-classification). The Hub contains thousands of models covering dozens of different machine learning tasks. \n\nExpand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.\n\nAt the time of writing, the most popular one is `google/vit-base-patch16-224`, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo. \n\n## Step 2 \u2014 Loading the Vision Transformer Model with Gradio\n\nWhen using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.\n\nBesides the import statement, it only takes a single line of Python to load and launch the demo. \n\nWe use the `gr.Interface.load()` method and pass in the path to the model including the `huggingface/` to designate that it is from the Hugging Face Hub.\n\n```python\nimport gradio as gr\n\ngr.Interface.load(\n \"huggingface/google/vit-base-patch16-224\",\n examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n```\n\nNotice that we have added one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. \n\nThis produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!\n\n\n\n----------\n\nAnd you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification with Vision Transformers

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control.

\n\n

State-of-the-art image classifiers are based on the transformers architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a single line of Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Step 1 \u2014 Choosing a Vision Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a model from the Hugging Face Model Hub. The Hub contains thousands of models covering dozens of different machine learning tasks.

\n\n

Expand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.

\n\n

At the time of writing, the most popular one is google/vit-base-patch16-224, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo.

\n\n

Step 2 \u2014 Loading the Vision Transformer Model with Gradio

\n\n

When using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.

\n\n

Besides the import statement, it only takes a single line of Python to load and launch the demo.

\n\n

We use the gr.Interface.load() method and pass in the path to the model including the huggingface/ to designate that it is from the Hugging Face Hub.

\n\n
import gradio as gr\n\ngr.Interface.load(\n             \"huggingface/google/vit-base-patch16-224\",\n             examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n
\n\n

Notice that we have added one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples.

\n\n

This produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!

\n\n\n\n
\n\n

And you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "TRANSFORMERS", "HUB"], "spaces": ["https://huggingface.co/spaces/abidlabs/vision-transformer"], "url": "/guides/image-classification-with-vision-transformers/", "contributor": null}]}, {"category": "Tabular Data Science And Plots", "guides": [{"name": "connecting-to-a-database", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": 1, "absolute_index": 23, "pretty_name": "Connecting To A Database", "content": "# Connecting to a Database\n\n\n\n\n## Introduction\n\nThis guide explains how you can use Gradio to connect your app to a database. We will be\nconnecting to a PostgreSQL database hosted on AWS but gradio is completely agnostic to the type of\ndatabase you are connecting to and where it's hosted. So as long as you can write python code to connect\nto your data, you can display it in a web UI with gradio \ud83d\udcaa\n\n## Overview \n \nWe will be analyzing bike share data from Chicago. The data is hosted on kaggle [here](https://www.kaggle.com/datasets/evangower/cyclistic-bike-share?select=202203-divvy-tripdata.csv).\nOur goal is to create a dashboard that will enable our business stakeholders to answer the following questions:\n\n1. Are electric bikes more popular than regular bikes?\n2. What are the top 5 most popular departure bike stations?\n\nAt the end of this guide, we will have a functioning application that looks like this:\n\n \n\n\n## Step 1 - Creating your database\n\nWe will be storing our data on a PostgreSQL hosted on Amazon's RDS service. Create an AWS account if you don't already have one\nand create a PostgreSQL database on the free tier. \n\n**Important**: If you plan to host this demo on HuggingFace Spaces, make sure database is on port **8080**. Spaces will\nblock all outgoing connections unless they are made to port 80, 443, or 8080 as noted [here](https://huggingface.co/docs/hub/spaces-overview#networking).\nRDS will not let you create a postgreSQL instance on ports 80 or 443.\n\nOnce your database is created, download the dataset from Kaggle and upload it to your database.\nFor the sake of this demo, we will only upload March 2022 data.\n\n\n## Step 2.a - Write your ETL code\nWe will be querying our database for the total count of rides split by the type of bicycle (electric, standard, or docked).\nWe will also query for the total count of rides that depart from each station and take the top 5. \n\nWe will then take the result of our queries and visualize them in with matplotlib.\n\nWe will use the pandas [read_sql](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html)\nmethod to connect to the database. This requires the `psycopg2` library to be installed. \n\nIn order to connect to our database, we will specify the database username, password, and host as environment variables.\nThis will make our app more secure by avoiding storing sensitive information as plain text in our application files.\n\n```python\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nDB_USER = os.getenv(\"DB_USER\")\nDB_PASSWORD = os.getenv(\"DB_PASSWORD\")\nDB_HOST = os.getenv(\"DB_HOST\")\nPORT = 8080\nDB_NAME = \"bikeshare\"\n\nconnection_string = f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}\"\n\ndef get_count_ride_type():\n df = pd.read_sql(\n \"\"\"\n SELECT COUNT(ride_id) as n, rideable_type\n FROM rides\n GROUP BY rideable_type\n ORDER BY n DESC\n \"\"\",\n con=connection_string\n )\n fig_m, ax = plt.subplots()\n ax.bar(x=df['rideable_type'], height=df['n'])\n ax.set_title(\"Number of rides by bycycle type\")\n ax.set_ylabel(\"Number of Rides\")\n ax.set_xlabel(\"Bicycle Type\")\n return fig_m\n\n\ndef get_most_popular_stations():\n \n df = pd.read_sql(\n \"\"\"\n SELECT COUNT(ride_id) as n, MAX(start_station_name) as station\n FROM RIDES\n WHERE start_station_name is NOT NULL\n GROUP BY start_station_id\n ORDER BY n DESC\n LIMIT 5\n \"\"\",\n con=connection_string\n )\n fig_m, ax = plt.subplots()\n ax.bar(x=df['station'], height=df['n'])\n ax.set_title(\"Most popular stations\")\n ax.set_ylabel(\"Number of Rides\")\n ax.set_xlabel(\"Station Name\")\n ax.set_xticklabels(\n df['station'], rotation=45, ha=\"right\", rotation_mode=\"anchor\"\n )\n ax.tick_params(axis=\"x\", labelsize=8)\n fig_m.tight_layout()\n return fig_m\n```\n\nIf you were to run our script locally, you could pass in your credentials as environment variables like so\n\n```bash\nDB_USER='username' DB_PASSWORD='password' DB_HOST='host' python app.py\n```\n\n\n## Step 2.c - Write your gradio app\nWe will display or matplotlib plots in two separate `gr.Plot` components displayed side by side using `gr.Row()`.\nBecause we have wrapped our function to fetch the data in a `demo.load()` event trigger,\nour demo will fetch the latest data **dynamically** from the database each time the web page loads. \ud83e\ude84\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n bike_type = gr.Plot()\n station = gr.Plot()\n\n demo.load(get_count_ride_type, inputs=None, outputs=bike_type)\n demo.load(get_most_popular_stations, inputs=None, outputs=station)\n\ndemo.launch()\n```\n\n## Step 3 - Deployment\nIf you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the `share=True` parameter to `launch`.\n\nBut what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.\n\nIf you haven't used Spaces before, follow the previous guide [here](/using_hugging_face_integrations).\nYou will have to add the `DB_USER`, `DB_PASSWORD`, and `DB_HOST` variables as \"Repo Secrets\". You can do this in the \"Settings\" tab.\n\n![secrets](https://github.com/gradio-app/gradio/blob/main/guides/assets/secrets.png?raw=true)\n\n## Conclusion\nCongratulations! You know how to connect your gradio app to a database hosted on the cloud! \u2601\ufe0f\n\nOur dashboard is now running on [Spaces](https://huggingface.co/spaces/gradio/chicago-bikeshare-dashboard).\nThe complete code is [here](https://huggingface.co/spaces/gradio/chicago-bikeshare-dashboard/blob/main/app.py)\n \nAs you can see, gradio gives you the power to connect to your data wherever it lives and display however you want! \ud83d\udd25", "html": "

Connecting to a Database

\n\n

Introduction

\n\n

This guide explains how you can use Gradio to connect your app to a database. We will be\nconnecting to a PostgreSQL database hosted on AWS but gradio is completely agnostic to the type of\ndatabase you are connecting to and where it's hosted. So as long as you can write python code to connect\nto your data, you can display it in a web UI with gradio \ud83d\udcaa

\n\n

Overview

\n\n

We will be analyzing bike share data from Chicago. The data is hosted on kaggle here.\nOur goal is to create a dashboard that will enable our business stakeholders to answer the following questions:

\n\n
    \n
  1. Are electric bikes more popular than regular bikes?
  2. \n
  3. What are the top 5 most popular departure bike stations?
  4. \n
\n\n

At the end of this guide, we will have a functioning application that looks like this:

\n\n

\n\n

Step 1 - Creating your database

\n\n

We will be storing our data on a PostgreSQL hosted on Amazon's RDS service. Create an AWS account if you don't already have one\nand create a PostgreSQL database on the free tier.

\n\n

Important: If you plan to host this demo on HuggingFace Spaces, make sure database is on port 8080. Spaces will\nblock all outgoing connections unless they are made to port 80, 443, or 8080 as noted here.\nRDS will not let you create a postgreSQL instance on ports 80 or 443.

\n\n

Once your database is created, download the dataset from Kaggle and upload it to your database.\nFor the sake of this demo, we will only upload March 2022 data.

\n\n

Step 2.a - Write your ETL code

\n\n

We will be querying our database for the total count of rides split by the type of bicycle (electric, standard, or docked).\nWe will also query for the total count of rides that depart from each station and take the top 5.

\n\n

We will then take the result of our queries and visualize them in with matplotlib.

\n\n

We will use the pandas read_sql\nmethod to connect to the database. This requires the psycopg2 library to be installed.

\n\n

In order to connect to our database, we will specify the database username, password, and host as environment variables.\nThis will make our app more secure by avoiding storing sensitive information as plain text in our application files.

\n\n
import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nDB_USER = os.getenv(\"DB_USER\")\nDB_PASSWORD = os.getenv(\"DB_PASSWORD\")\nDB_HOST = os.getenv(\"DB_HOST\")\nPORT = 8080\nDB_NAME = \"bikeshare\"\n\nconnection_string = f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}\"\n\ndef get_count_ride_type():\n    df = pd.read_sql(\n    \"\"\"\n        SELECT COUNT(ride_id) as n, rideable_type\n        FROM rides\n        GROUP BY rideable_type\n        ORDER BY n DESC\n    \"\"\",\n    con=connection_string\n    )\n    fig_m, ax = plt.subplots()\n    ax.bar(x=df['rideable_type'], height=df['n'])\n    ax.set_title(\"Number of rides by bycycle type\")\n    ax.set_ylabel(\"Number of Rides\")\n    ax.set_xlabel(\"Bicycle Type\")\n    return fig_m\n\n\ndef get_most_popular_stations():\n\n    df = pd.read_sql(\n        \"\"\"\n    SELECT COUNT(ride_id) as n, MAX(start_station_name) as station\n    FROM RIDES\n    WHERE start_station_name is NOT NULL\n    GROUP BY start_station_id\n    ORDER BY n DESC\n    LIMIT 5\n    \"\"\",\n    con=connection_string\n    )\n    fig_m, ax = plt.subplots()\n    ax.bar(x=df['station'], height=df['n'])\n    ax.set_title(\"Most popular stations\")\n    ax.set_ylabel(\"Number of Rides\")\n    ax.set_xlabel(\"Station Name\")\n    ax.set_xticklabels(\n        df['station'], rotation=45, ha=\"right\", rotation_mode=\"anchor\"\n    )\n    ax.tick_params(axis=\"x\", labelsize=8)\n    fig_m.tight_layout()\n    return fig_m\n
\n\n

If you were to run our script locally, you could pass in your credentials as environment variables like so

\n\n
DB_USER='username' DB_PASSWORD='password' DB_HOST='host' python app.py\n
\n\n

Step 2.c - Write your gradio app

\n\n

We will display or matplotlib plots in two separate gr.Plot components displayed side by side using gr.Row().\nBecause we have wrapped our function to fetch the data in a demo.load() event trigger,\nour demo will fetch the latest data dynamically from the database each time the web page loads. \ud83e\ude84

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        bike_type = gr.Plot()\n        station = gr.Plot()\n\n    demo.load(get_count_ride_type, inputs=None, outputs=bike_type)\n    demo.load(get_most_popular_stations, inputs=None, outputs=station)\n\ndemo.launch()\n
\n\n

Step 3 - Deployment

\n\n

If you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the share=True parameter to launch.

\n\n

But what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.

\n\n

If you haven't used Spaces before, follow the previous guide here.\nYou will have to add the DB_USER, DB_PASSWORD, and DB_HOST variables as \"Repo Secrets\". You can do this in the \"Settings\" tab.

\n\n

\"secrets\"

\n\n

Conclusion

\n\n

Congratulations! You know how to connect your gradio app to a database hosted on the cloud! \u2601\ufe0f

\n\n

Our dashboard is now running on Spaces.\nThe complete code is here

\n\n

As you can see, gradio gives you the power to connect to your data wherever it lives and display however you want! \ud83d\udd25

\n", "tags": ["TABULAR", "PLOTS "], "spaces": ["https://huggingface.co/spaces/gradio/chicago-bikeshare-dashboard"], "url": "/guides/connecting-to-a-database/", "contributor": null}, {"name": "creating-a-dashboard-from-bigquery-data", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 24, "pretty_name": "Creating A Dashboard From Bigquery Data", "content": "# Creating a Real-Time Dashboard from BigQuery Data\n\n\n\n\n[Google BigQuery](https://cloud.google.com/bigquery) is a cloud-based service for processing very large data sets. It is a serverless and highly scalable data warehousing solution that enables users to analyze data [using SQL-like queries](https://www.oreilly.com/library/view/google-bigquery-the/9781492044451/ch01.html).\n\nIn this tutorial, we will show you how to query a BigQuery dataset in Python and display the data in a dashboard that updates in real time using `gradio`. The dashboard will look like this:\n\n\n\nWe'll cover the following steps in this Guide:\n\n1. Setting up your BigQuery credentials\n2. Using the BigQuery client\n3. Building the real-time dashboard (in just *7 lines of Python*)\n\nWe'll be working with the [New York Times' COVID dataset](https://www.nytimes.com/interactive/2021/us/covid-cases.html) that is available as a public dataset on BigQuery. The dataset, named `covid19_nyt.us_counties` contains the latest information about the number of confirmed cases and deaths from COVID across US counties. \n\n**Prerequisites**: This Guide uses [Gradio Blocks](/guides/quickstart/#blocks-more-flexibility-and-control), so make your are familiar with the Blocks class. \n\n## Setting up your BigQuery Credentials\n\nTo use Gradio with BigQuery, you will need to obtain your BigQuery credentials and use them with the [BigQuery Python client](https://pypi.org/project/google-cloud-bigquery/). If you already have BigQuery credentials (as a `.json` file), you can skip this section. If not, you can do this for free in just a couple of minutes.\n\n1. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)\n\n2. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.\n\n3. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"BigQuery API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then the BigQuery is already enabled, and you're all set. \n\n4. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.\n\n5. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. Also grant the service account permissions by giving it a role such as \"BigQuery User\", which will allow you to run queries.\n\n6. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:\n\n```json\n{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n```\n\n## Using the BigQuery Client\n\nOnce you have the credentials, you will need to use the BigQuery Python client to authenticate using your credentials. To do this, you will need to install the BigQuery Python client by running the following command in the terminal:\n\n```bash\npip install google-cloud-bigquery[pandas]\n```\n\nYou'll notice that we've installed the pandas add-on, which will be helpful for processing the BigQuery dataset as a pandas dataframe. Once the client is installed, you can authenticate using your credentials by running the following code:\n\n```py\nfrom google.cloud import bigquery\n\nclient = bigquery.Client.from_service_account_json(\"path/to/key.json\")\n```\n\nWith your credentials authenticated, you can now use the BigQuery Python client to interact with your BigQuery datasets. \n\nHere is an example of a function which queries the `covid19_nyt.us_counties` dataset in BigQuery to show the top 20 counties with the most confirmed cases as of the current day:\n\n```py\nimport numpy as np\n\nQUERY = (\n 'SELECT * FROM `bigquery-public-data.covid19_nyt.us_counties` ' \n 'ORDER BY date DESC,confirmed_cases DESC '\n 'LIMIT 20')\n\ndef run_query():\n query_job = client.query(QUERY) \n query_result = query_job.result() \n df = query_result.to_dataframe()\n # Select a subset of columns \n df = df[[\"confirmed_cases\", \"deaths\", \"county\", \"state_name\"]]\n # Convert numeric columns to standard numpy types\n df = df.astype({\"deaths\": np.int64, \"confirmed_cases\": np.int64})\n return df\n```\n\n## Building the Real-Time Dashboard\n\nOnce you have a function to query the data, you can use the `gr.DataFrame` component from the Gradio library to display the results in a tabular format. This is a useful way to inspect the data and make sure that it has been queried correctly.\n\nHere is an example of how to use the `gr.DataFrame` component to display the results. By passing in the `run_query` function to `gr.DataFrame`, we instruct Gradio to run the function as soon as the page loads and show the results. In addition, you also pass in the keyword `every` to tell the dashboard to refresh every hour (60*60 seconds).\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.DataFrame(run_query, every=60*60)\n\ndemo.queue().launch() # Run the demo using queuing\n```\n\nPerhaps you'd like to add a visualization to our dashboard. You can use the `gr.ScatterPlot()` component to visualize the data in a scatter plot. This allows you to see the relationship between different variables such as case count and case deaths in the dataset and can be useful for exploring the data and gaining insights. Again, we can do this in real-time\nby passing in the `every` parameter. \n\nHere is a complete example showing how to use the `gr.ScatterPlot` to visualize in addition to displaying data with the `gr.DataFrame`\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# \ud83d\udc89 Covid Dashboard (Updated Hourly)\")\n with gr.Row():\n gr.DataFrame(run_query, every=60*60)\n gr.ScatterPlot(run_query, every=60*60, x=\"confirmed_cases\", \n y=\"deaths\", tooltip=\"county\", width=500, height=500)\n\ndemo.queue().launch() # Run the demo with queuing enabled\n```", "html": "

Creating a Real-Time Dashboard from BigQuery Data

\n\n

Google BigQuery is a cloud-based service for processing very large data sets. It is a serverless and highly scalable data warehousing solution that enables users to analyze data using SQL-like queries.

\n\n

In this tutorial, we will show you how to query a BigQuery dataset in Python and display the data in a dashboard that updates in real time using gradio. The dashboard will look like this:

\n\n

\n\n

We'll cover the following steps in this Guide:

\n\n
    \n
  1. Setting up your BigQuery credentials
  2. \n
  3. Using the BigQuery client
  4. \n
  5. Building the real-time dashboard (in just 7 lines of Python)
  6. \n
\n\n

We'll be working with the New York Times' COVID dataset that is available as a public dataset on BigQuery. The dataset, named covid19_nyt.us_counties contains the latest information about the number of confirmed cases and deaths from COVID across US counties.

\n\n

Prerequisites: This Guide uses Gradio Blocks, so make your are familiar with the Blocks class.

\n\n

Setting up your BigQuery Credentials

\n\n

To use Gradio with BigQuery, you will need to obtain your BigQuery credentials and use them with the BigQuery Python client. If you already have BigQuery credentials (as a .json file), you can skip this section. If not, you can do this for free in just a couple of minutes.

\n\n
    \n
  1. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)

  2. \n
  3. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.

  4. \n
  5. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"BigQuery API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then the BigQuery is already enabled, and you're all set.

  6. \n
  7. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.

  8. \n
  9. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. Also grant the service account permissions by giving it a role such as \"BigQuery User\", which will allow you to run queries.

  10. \n
  11. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:

  12. \n
\n\n
{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\":  \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n
\n\n

Using the BigQuery Client

\n\n

Once you have the credentials, you will need to use the BigQuery Python client to authenticate using your credentials. To do this, you will need to install the BigQuery Python client by running the following command in the terminal:

\n\n
pip install google-cloud-bigquery[pandas]\n
\n\n

You'll notice that we've installed the pandas add-on, which will be helpful for processing the BigQuery dataset as a pandas dataframe. Once the client is installed, you can authenticate using your credentials by running the following code:

\n\n
from google.cloud import bigquery\n\nclient = bigquery.Client.from_service_account_json(\"path/to/key.json\")\n
\n\n

With your credentials authenticated, you can now use the BigQuery Python client to interact with your BigQuery datasets.

\n\n

Here is an example of a function which queries the covid19_nyt.us_counties dataset in BigQuery to show the top 20 counties with the most confirmed cases as of the current day:

\n\n
import numpy as np\n\nQUERY = (\n    'SELECT * FROM `bigquery-public-data.covid19_nyt.us_counties` ' \n    'ORDER BY date DESC,confirmed_cases DESC '\n    'LIMIT 20')\n\ndef run_query():\n    query_job = client.query(QUERY)  \n    query_result = query_job.result()  \n    df = query_result.to_dataframe()\n    # Select a subset of columns \n    df = df[[\"confirmed_cases\", \"deaths\", \"county\", \"state_name\"]]\n    # Convert numeric columns to standard numpy types\n    df = df.astype({\"deaths\": np.int64, \"confirmed_cases\": np.int64})\n    return df\n
\n\n

Building the Real-Time Dashboard

\n\n

Once you have a function to query the data, you can use the gr.DataFrame component from the Gradio library to display the results in a tabular format. This is a useful way to inspect the data and make sure that it has been queried correctly.

\n\n

Here is an example of how to use the gr.DataFrame component to display the results. By passing in the run_query function to gr.DataFrame, we instruct Gradio to run the function as soon as the page loads and show the results. In addition, you also pass in the keyword every to tell the dashboard to refresh every hour (60*60 seconds).

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.DataFrame(run_query, every=60*60)\n\ndemo.queue().launch()  # Run the demo using queuing\n
\n\n

Perhaps you'd like to add a visualization to our dashboard. You can use the gr.ScatterPlot() component to visualize the data in a scatter plot. This allows you to see the relationship between different variables such as case count and case deaths in the dataset and can be useful for exploring the data and gaining insights. Again, we can do this in real-time\nby passing in the every parameter.

\n\n

Here is a complete example showing how to use the gr.ScatterPlot to visualize in addition to displaying data with the gr.DataFrame

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"# \ud83d\udc89 Covid Dashboard (Updated Hourly)\")\n    with gr.Row():\n        gr.DataFrame(run_query, every=60*60)\n        gr.ScatterPlot(run_query, every=60*60, x=\"confirmed_cases\", \n                        y=\"deaths\", tooltip=\"county\", width=500, height=500)\n\ndemo.queue().launch()  # Run the demo with queuing enabled\n
\n", "tags": ["TABULAR", "DASHBOARD", "PLOTS "], "spaces": [], "url": "/guides/creating-a-dashboard-from-bigquery-data/", "contributor": null}, {"name": "creating-a-dashboard-from-supabase-data", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 25, "pretty_name": "Creating A Dashboard From Supabase Data", "content": "# Create a Dashboard from Supabase Data\n\n\n\n[Supabase](https://supabase.com/) is a cloud-based open-source backend that provides a PostgreSQL database, authentication, and other useful features for building web and mobile applications. In this tutorial, you will learn how to read data from Supabase and plot it in **real-time** on a Gradio Dashboard.\n\n**Prerequisites:** To start, you will need a free Supabase account, which you can sign up for here: [https://app.supabase.com/](https://app.supabase.com/)\n\nIn this end-to-end guide, you will learn how to:\n\n* Create tables in Supabase\n* Write data to Supabase using the Supabase Python Client\n* Visualize the data in a real-time dashboard using Gradio\n\nIf you already have data on Supabase that you'd like to visualize in a dashboard, you can skip the first two sections and go directly to [visualizing the data](#visualize-the-data-in-a-real-time-gradio-dashboard)!\n\n## Create a table in Supabase\n\nFirst of all, we need some data to visualize. Following this [excellent guide](https://supabase.com/blog/loading-data-supabase-python), we'll create fake commerce data and put it in Supabase. \n\n1\\. Start by creating a new project in Supabase. Once you're logged in, click the \"New Project\" button\n\n2\\. Give your project a name and database password. You can also choose a pricing plan (for our purposes, the Free Tier is sufficient!)\n\n3\\. You'll be presented with your API keys while the database spins up (can take up to 2 minutes). \n\n4\\. Click on \"Table Editor\" (the table icon) in the left pane to create a new table. We'll create a single table called `Product`, with the following schema:\n\n
\n\n\n\n\n\n
product_idint8
inventory_countint8
pricefloat8
product_namevarchar
\n
\n\n5\\. Click Save to save the table schema. \n\n\nOur table is now ready!\n\n\n## Write data to Supabase\n\nThe next step is to write data to a Supabase dataset. We will use the Supabase Python library to do this. \n\n6\\. Install `supabase` by running the following command in your terminal:\n\n```bash\npip install supabase\n```\n\n7\\. Get your project URL and API key. Click the Settings (gear icon) on the left pane and click 'API'. The URL is listed in the Project URL box, while the API key is listed in Project API keys (with the tags `service_role`, `secret`)\n\n8\\. Now, run the following Python script to write some fake data to the table (note you have to put the values of `SUPABASE_URL` and `SUPABASE_SECRET_KEY` from step 7): \n\n```python\nimport supabase\n\n# Initialize the Supabase client\nclient = supabase.create_client('SUPABASE_URL', 'SUPABASE_SECRET_KEY')\n\n# Define the data to write\nimport random\n\nmain_list = []\nfor i in range(10):\n value = {'product_id': i, \n 'product_name': f\"Item {i}\",\n 'inventory_count': random.randint(1, 100), \n 'price': random.random()*100\n }\n main_list.append(value)\n\n# Write the data to the table\ndata = client.table('Product').insert(main_list).execute()\n```\n\nReturn to your Supabase dashboard and refresh the page, you should now see 10 rows populated in the `Product` table!\n\n## Visualize the Data in a Real-Time Gradio Dashboard\n\nFinally, we will read the data from the Supabase dataset using the same `supabase` Python library and create a realtime dashboard using `gradio`. \n\nNote: We repeat certain steps in this section (like creating the Supabase client) in case you did not go through the previous sections. As described in Step 7, you will need the project URL and API Key for your database.\n\n9\\. Write a function that loads the data from the `Product` table and returns it as a pandas Dataframe:\n\n\n```python\nimport supabase\nimport pandas as pd\n\nclient = supabase.create_client('SUPABASE_URL', 'SUPABASE_SECRET_KEY')\n\ndef read_data():\n response = client.table('Product').select(\"*\").execute()\n df = pd.DataFrame(response.data)\n return df\n```\n\n10\\. Create a small Gradio Dashboard with 2 Barplots that plots the prices and inventories of all of the items every minute and updates in real-time:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as dashboard:\n with gr.Row():\n gr.BarPlot(read_data, x=\"product_id\", y=\"price\", title=\"Prices\", every=60)\n gr.BarPlot(read_data, x=\"product_id\", y=\"inventory_count\", title=\"Inventory\", every=60)\n\ndashboard.queue().launch()\n```\n\nNotice that by passing in a function to `gr.BarPlot()`, we have the BarPlot query the database as soon as the web app loads (and then again every 60 seconds because of the `every` parameter). Your final dashboard should look something like this:\n\n\n\n\n## Conclusion\n\nThat's it! In this tutorial, you learned how to write data to a Supabase dataset, and then read that data and plot the results as bar plots. If you update the data in the Supabase database, you'll notice that the Gradio dashboard will update within a minute. \n\nTry adding more plots and visualizations to this example (or with a different dataset) to build a more complex dashboard! ", "html": "

Create a Dashboard from Supabase Data

\n\n

Supabase is a cloud-based open-source backend that provides a PostgreSQL database, authentication, and other useful features for building web and mobile applications. In this tutorial, you will learn how to read data from Supabase and plot it in real-time on a Gradio Dashboard.

\n\n

Prerequisites: To start, you will need a free Supabase account, which you can sign up for here: https://app.supabase.com/

\n\n

In this end-to-end guide, you will learn how to:

\n\n
    \n
  • Create tables in Supabase
  • \n
  • Write data to Supabase using the Supabase Python Client
  • \n
  • Visualize the data in a real-time dashboard using Gradio
  • \n
\n\n

If you already have data on Supabase that you'd like to visualize in a dashboard, you can skip the first two sections and go directly to visualizing the data!

\n\n

Create a table in Supabase

\n\n

First of all, we need some data to visualize. Following this excellent guide, we'll create fake commerce data and put it in Supabase.

\n\n

1. Start by creating a new project in Supabase. Once you're logged in, click the \"New Project\" button

\n\n

2. Give your project a name and database password. You can also choose a pricing plan (for our purposes, the Free Tier is sufficient!)

\n\n

3. You'll be presented with your API keys while the database spins up (can take up to 2 minutes).

\n\n

4. Click on \"Table Editor\" (the table icon) in the left pane to create a new table. We'll create a single table called Product, with the following schema:

\n\n

\n\n\n\n\n\n\n
product_idint8
inventory_countint8
pricefloat8
product_namevarchar
\n\n

\n\n

5. Click Save to save the table schema.

\n\n

Our table is now ready!

\n\n

Write data to Supabase

\n\n

The next step is to write data to a Supabase dataset. We will use the Supabase Python library to do this.

\n\n

6. Install supabase by running the following command in your terminal:

\n\n
pip install supabase\n
\n\n

7. Get your project URL and API key. Click the Settings (gear icon) on the left pane and click 'API'. The URL is listed in the Project URL box, while the API key is listed in Project API keys (with the tags service_role, secret)

\n\n

8. Now, run the following Python script to write some fake data to the table (note you have to put the values of SUPABASE_URL and SUPABASE_SECRET_KEY from step 7):

\n\n
import supabase\n\n# Initialize the Supabase client\nclient = supabase.create_client('SUPABASE_URL', 'SUPABASE_SECRET_KEY')\n\n# Define the data to write\nimport random\n\nmain_list = []\nfor i in range(10):\n    value = {'product_id': i, \n             'product_name': f\"Item {i}\",\n             'inventory_count': random.randint(1, 100), \n             'price': random.random()*100\n            }\n    main_list.append(value)\n\n# Write the data to the table\ndata = client.table('Product').insert(main_list).execute()\n
\n\n

Return to your Supabase dashboard and refresh the page, you should now see 10 rows populated in the Product table!

\n\n

Visualize the Data in a Real-Time Gradio Dashboard

\n\n

Finally, we will read the data from the Supabase dataset using the same supabase Python library and create a realtime dashboard using gradio.

\n\n

Note: We repeat certain steps in this section (like creating the Supabase client) in case you did not go through the previous sections. As described in Step 7, you will need the project URL and API Key for your database.

\n\n

9. Write a function that loads the data from the Product table and returns it as a pandas Dataframe:

\n\n
import supabase\nimport pandas as pd\n\nclient = supabase.create_client('SUPABASE_URL', 'SUPABASE_SECRET_KEY')\n\ndef read_data():\n    response = client.table('Product').select(\"*\").execute()\n    df = pd.DataFrame(response.data)\n    return df\n
\n\n

10. Create a small Gradio Dashboard with 2 Barplots that plots the prices and inventories of all of the items every minute and updates in real-time:

\n\n
import gradio as gr\n\nwith gr.Blocks() as dashboard:\n    with gr.Row():\n        gr.BarPlot(read_data, x=\"product_id\", y=\"price\", title=\"Prices\", every=60)\n        gr.BarPlot(read_data, x=\"product_id\", y=\"inventory_count\", title=\"Inventory\", every=60)\n\ndashboard.queue().launch()\n
\n\n

Notice that by passing in a function to gr.BarPlot(), we have the BarPlot query the database as soon as the web app loads (and then again every 60 seconds because of the every parameter). Your final dashboard should look something like this:

\n\n

\n\n

Conclusion

\n\n

That's it! In this tutorial, you learned how to write data to a Supabase dataset, and then read that data and plot the results as bar plots. If you update the data in the Supabase database, you'll notice that the Gradio dashboard will update within a minute.

\n\n

Try adding more plots and visualizations to this example (or with a different dataset) to build a more complex dashboard!

\n", "tags": ["TABULAR", "DASHBOARD", "PLOTS "], "spaces": [], "url": "/guides/creating-a-dashboard-from-supabase-data/", "contributor": null}, {"name": "creating-a-realtime-dashboard-from-google-sheets", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 26, "pretty_name": "Creating A Realtime Dashboard From Google Sheets", "content": "# Creating a Real-Time Dashboard from Google Sheets\n\n\n\n[Google Sheets](https://www.google.com/sheets/about/) are an easy way to store tabular data in the form of spreadsheets. With Gradio and pandas, it's easy to read data from public or private Google Sheets and then display the data or plot it. In this blog post, we'll build a small *real-time* dashboard, one that updates when the data in the Google Sheets updates. \n\nBuilding the dashboard itself will just be 9 lines of Python code using Gradio, and our final dashboard will look like this:\n\n\n\n**Prerequisites**: This Guide uses [Gradio Blocks](/guides/quickstart/#blocks-more-flexibility-and-control), so make you are familiar with the Blocks class. \n\nThe process is a little different depending on if you are working with a publicly accessible or a private Google Sheet. We'll cover both, so let's get started!\n\n## Public Google Sheets\n\nBuilding a dashboard from a public Google Sheet is very easy, thanks to the [`pandas` library](https://pandas.pydata.org/):\n\n1\\. Get the URL of the Google Sheets that you want to use. To do this, simply go to the Google Sheets, click on the \"Share\" button in the top-right corner, and then click on the \"Get shareable link\" button. This will give you a URL that looks something like this:\n\n```html\nhttps://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\n```\n\n2\\. Now, let's modify this URL and then use it to read the data from the Google Sheets into a Pandas DataFrame. (In the code below, replace the `URL` variable with the URL of your public Google Sheet):\n\n```python\nimport pandas as pd\n\nURL = \"https://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\"\ncsv_url = URL.replace('/edit#gid=', '/export?format=csv&gid=')\n\ndef get_data():\n return pd.read_csv(csv_url)\n```\n\n3\\. The data query is a function, which means that it's easy to display it real-time using the the `gr.DataFrame` component, or plot it real-time using the `gr.LinePlot` component (of course, depending on the data, a different plot may be appropriate). To do this, just pass the function into the respective components, and set the `every` parameter based on how frequently (in seconds) you would like the component to refresh. Here's the Gradio code:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# \ud83d\udcc8 Real-Time Line Plot\")\n with gr.Row():\n with gr.Column():\n gr.DataFrame(get_data, every=5)\n with gr.Column():\n gr.LinePlot(get_data, every=5, x=\"Date\", y=\"Sales\", y_title=\"Sales ($ millions)\", overlay_point=True, width=500, height=500)\n\ndemo.queue().launch() # Run the demo with queuing enabled\n```\n \nAnd that's it! You have a dashboard that refreshes every 5 seconds, pulling the data from your Google Sheet.\n\n## Private Google Sheets\n\nFor private Google Sheets, the process requires a little more work, but not that much! The key difference is that now, you must authenticate yourself to authorize access to the private Google Sheets.\n\n### Authentication\n\nTo authenticate yourself, obtain credentials from Google Cloud. Here's [how to set up google cloud credentials](https://developers.google.com/workspace/guides/create-credentials):\n\n1\\. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)\n\n2\\. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.\n\n3\\. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"Google Sheets API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then Google Sheets is already enabled, and you're all set. \n\n4\\. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.\n\n5\\. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. **Note down the email of the service account**\n\n6\\. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:\n\n```json\n{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n```\n\n### Querying\n\nOnce you have the credentials `.json` file, you can use the following steps to query your Google Sheet:\n\n1\\. Click on the \"Share\" button in the top-right corner of the Google Sheet. Share the Google Sheets with the email address of the service from Step 5 of authentication subsection (this step is important!). Then click on the \"Get shareable link\" button. This will give you a URL that looks something like this:\n\n```html\nhttps://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\n```\n\n\n2\\. Install the [`gspread` library](https://docs.gspread.org/en/v5.7.0/), which makes it easy to work with the [Google Sheets API](https://developers.google.com/sheets/api/guides/concepts) in Python by running in the terminal: `pip install gspread`\n\n3\\. Write a function to load the data from the Google Sheet, like this (replace the `URL` variable with the URL of your private Google Sheet):\n\n```python\nimport gspread\nimport pandas as pd\n\n# Authenticate with Google and get the sheet\nURL = 'https://docs.google.com/spreadsheets/d/1_91Vps76SKOdDQ8cFxZQdgjTJiz23375sAT7vPvaj4k/edit#gid=0'\n\ngc = gspread.service_account(\"path/to/key.json\")\nsh = gc.open_by_url(URL)\nworksheet = sh.sheet1 \n\ndef get_data():\n values = worksheet.get_all_values()\n df = pd.DataFrame(values[1:], columns=values[0])\n return df\n\n```\n\n4\\. The data query is a function, which means that it's easy to display it real-time using the the `gr.DataFrame` component, or plot it real-time using the `gr.LinePlot` component (of course, depending on the data, a different plot may be appropriate). To do this, we just pass the function into the respective components, and set the `every` parameter based on how frequently (in seconds) we would like the component to refresh. Here's the Gradio code:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# \ud83d\udcc8 Real-Time Line Plot\")\n with gr.Row():\n with gr.Column():\n gr.DataFrame(get_data, every=5)\n with gr.Column():\n gr.LinePlot(get_data, every=5, x=\"Date\", y=\"Sales\", y_title=\"Sales ($ millions)\", overlay_point=True, width=500, height=500)\n\ndemo.queue().launch() # Run the demo with queuing enabled\n```\n \nYou now have a Dashboard that refreshes every 5 seconds, pulling the data from your Google Sheet.\n\n\n## Conclusion\n\nAnd that's all there is to it! With just a few lines of code, you can use `gradio` and other libraries to read data from a public or private Google Sheet and then display and plot the data in a real-time dashboard.\n\n\n\n", "html": "

Creating a Real-Time Dashboard from Google Sheets

\n\n

Google Sheets are an easy way to store tabular data in the form of spreadsheets. With Gradio and pandas, it's easy to read data from public or private Google Sheets and then display the data or plot it. In this blog post, we'll build a small real-time dashboard, one that updates when the data in the Google Sheets updates.

\n\n

Building the dashboard itself will just be 9 lines of Python code using Gradio, and our final dashboard will look like this:

\n\n

\n\n

Prerequisites: This Guide uses Gradio Blocks, so make you are familiar with the Blocks class.

\n\n

The process is a little different depending on if you are working with a publicly accessible or a private Google Sheet. We'll cover both, so let's get started!

\n\n

Public Google Sheets

\n\n

Building a dashboard from a public Google Sheet is very easy, thanks to the pandas library:

\n\n

1. Get the URL of the Google Sheets that you want to use. To do this, simply go to the Google Sheets, click on the \"Share\" button in the top-right corner, and then click on the \"Get shareable link\" button. This will give you a URL that looks something like this:

\n\n
https://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\n
\n\n

2. Now, let's modify this URL and then use it to read the data from the Google Sheets into a Pandas DataFrame. (In the code below, replace the URL variable with the URL of your public Google Sheet):

\n\n
import pandas as pd\n\nURL = \"https://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\"\ncsv_url = URL.replace('/edit#gid=', '/export?format=csv&gid=')\n\ndef get_data():\n    return pd.read_csv(csv_url)\n
\n\n

3. The data query is a function, which means that it's easy to display it real-time using the the gr.DataFrame component, or plot it real-time using the gr.LinePlot component (of course, depending on the data, a different plot may be appropriate). To do this, just pass the function into the respective components, and set the every parameter based on how frequently (in seconds) you would like the component to refresh. Here's the Gradio code:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"# \ud83d\udcc8 Real-Time Line Plot\")\n    with gr.Row():\n        with gr.Column():\n            gr.DataFrame(get_data, every=5)\n        with gr.Column():\n            gr.LinePlot(get_data, every=5, x=\"Date\", y=\"Sales\", y_title=\"Sales ($ millions)\", overlay_point=True, width=500, height=500)\n\ndemo.queue().launch()  # Run the demo with queuing enabled\n
\n\n

And that's it! You have a dashboard that refreshes every 5 seconds, pulling the data from your Google Sheet.

\n\n

Private Google Sheets

\n\n

For private Google Sheets, the process requires a little more work, but not that much! The key difference is that now, you must authenticate yourself to authorize access to the private Google Sheets.

\n\n

Authentication

\n\n

To authenticate yourself, obtain credentials from Google Cloud. Here's how to set up google cloud credentials:

\n\n

1. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)

\n\n

2. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.

\n\n

3. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"Google Sheets API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then Google Sheets is already enabled, and you're all set.

\n\n

4. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.

\n\n

5. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. Note down the email of the service account

\n\n

6. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:

\n\n
{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\":  \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n
\n\n

Querying

\n\n

Once you have the credentials .json file, you can use the following steps to query your Google Sheet:

\n\n

1. Click on the \"Share\" button in the top-right corner of the Google Sheet. Share the Google Sheets with the email address of the service from Step 5 of authentication subsection (this step is important!). Then click on the \"Get shareable link\" button. This will give you a URL that looks something like this:

\n\n
https://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\n
\n\n

2. Install the gspread library, which makes it easy to work with the Google Sheets API in Python by running in the terminal: pip install gspread

\n\n

3. Write a function to load the data from the Google Sheet, like this (replace the URL variable with the URL of your private Google Sheet):

\n\n
import gspread\nimport pandas as pd\n\n# Authenticate with Google and get the sheet\nURL = 'https://docs.google.com/spreadsheets/d/1_91Vps76SKOdDQ8cFxZQdgjTJiz23375sAT7vPvaj4k/edit#gid=0'\n\ngc = gspread.service_account(\"path/to/key.json\")\nsh = gc.open_by_url(URL)\nworksheet = sh.sheet1 \n\ndef get_data():\n    values = worksheet.get_all_values()\n    df = pd.DataFrame(values[1:], columns=values[0])\n    return df\n\n
\n\n

4. The data query is a function, which means that it's easy to display it real-time using the the gr.DataFrame component, or plot it real-time using the gr.LinePlot component (of course, depending on the data, a different plot may be appropriate). To do this, we just pass the function into the respective components, and set the every parameter based on how frequently (in seconds) we would like the component to refresh. Here's the Gradio code:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"# \ud83d\udcc8 Real-Time Line Plot\")\n    with gr.Row():\n        with gr.Column():\n            gr.DataFrame(get_data, every=5)\n        with gr.Column():\n            gr.LinePlot(get_data, every=5, x=\"Date\", y=\"Sales\", y_title=\"Sales ($ millions)\", overlay_point=True, width=500, height=500)\n\ndemo.queue().launch()  # Run the demo with queuing enabled\n
\n\n

You now have a Dashboard that refreshes every 5 seconds, pulling the data from your Google Sheet.

\n\n

Conclusion

\n\n

And that's all there is to it! With just a few lines of code, you can use gradio and other libraries to read data from a public or private Google Sheet and then display and plot the data in a real-time dashboard.

\n", "tags": ["TABULAR", "DASHBOARD", "PLOTS "], "spaces": [], "url": "/guides/creating-a-realtime-dashboard-from-google-sheets/", "contributor": null}, {"name": "plot-component-for-maps", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 27, "pretty_name": "Plot Component For Maps", "content": "# How to Use the Plot Component for Maps\n\n\n\n## Introduction\n\nThis guide explains how you can use Gradio to plot geographical data on a map using the `gradio.Plot` component. The Gradio `Plot` component works with Matplotlib, Bokeh and Plotly. Plotly is what we will be working with in this guide. Plotly allows developers to easily create all sorts of maps with their geographical data. Take a look [here](https://plotly.com/python/maps/) for some examples.\n\n## Overview\n\nWe will be using the New York City Airbnb dataset, which is hosted on kaggle [here](https://www.kaggle.com/datasets/dgomonov/new-york-city-airbnb-open-data). I've uploaded it to the Hugging Face Hub as a dataset [here](https://huggingface.co/datasets/gradio/NYC-Airbnb-Open-Data) for easier use and download. Using this data we will plot Airbnb locations on a map output and allow filtering based on price and location. Below is the demo that we will be building. \u26a1\ufe0f\n\n\n\n## Step 1 - Loading CSV data \ud83d\udcbe\n\nLet's start by loading the Airbnb NYC data from the Hugging Face Hub.\n\n```python\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n new_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = new_df[\"name\"].tolist()\n prices = new_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n```\n\nIn the code above, we first load the csv data into a pandas dataframe. Let's begin by defining a function that we will use as the prediction function for the gradio app. This function will accept the minimum price and maximum price range as well as the list of boroughs to filter the resulting map. We can use the passed in values (`min_price`, `max_price`, and list of `boroughs`) to filter the dataframe and create `new_df`. Next we will create `text_list` of the names and prices of each Airbnb to use as labels on the map.\n\n## Step 2 - Map Figure \ud83c\udf10\n\nPlotly makes it easy to work with maps. Let's take a look below how we can create a map figure.\n\n```python\nimport plotly.graph_objects as go\n\nfig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=new_df['latitude'].tolist(),\n lon=new_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\nfig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n)\n```\n\nAbove, we create a scatter plot on mapbox by passing it our list of latitudes and longitudes to plot markers. We also pass in our custom data of names and prices for additional info to appear on every marker we hover over. Next we use `update_layout` to specify other map settings such as zoom, and centering.\n\nMore info [here](https://plotly.com/python/scattermapbox/) on scatter plots using Mapbox and Plotly.\n\n## Step 3 - Gradio App \u26a1\ufe0f\n\nWe will use two `gr.Number` components and a `gr.CheckboxGroup` to allow users of our app to specify price ranges and borough locations. We will then use the `gr.Plot` component as an output for our Plotly + Mapbox map we created earlier.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n```\n\nWe layout these components using the `gr.Column` and `gr.Row` and we'll also add event triggers for when the demo first loads and when our \"Update Filter\" button is clicked in order to trigger the map to update with our new filters.\n\nThis is what the full demo code looks like:\n\n```python\nimport gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = filtered_df[\"name\"].tolist()\n prices = filtered_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n fig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=filtered_df['latitude'].tolist(),\n lon=filtered_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\ndemo.launch()\n```\n\n## Step 4 - Deployment \ud83e\udd17\n\nIf you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the `share=True` parameter to `launch`.\n\nBut what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.\n\nIf you haven't used Spaces before, follow the previous guide [here](/using_hugging_face_integrations).\n\n## Conclusion \ud83c\udf89\n\nAnd you're all done! That's all the code you need to build a map demo.\n\nHere's a link to the demo [Map demo](https://huggingface.co/spaces/gradio/map_airbnb) and [complete code](https://huggingface.co/spaces/gradio/map_airbnb/blob/main/run.py) (on Hugging Face Spaces)\n", "html": "

How to Use the Plot Component for Maps

\n\n

Introduction

\n\n

This guide explains how you can use Gradio to plot geographical data on a map using the gradio.Plot component. The Gradio Plot component works with Matplotlib, Bokeh and Plotly. Plotly is what we will be working with in this guide. Plotly allows developers to easily create all sorts of maps with their geographical data. Take a look here for some examples.

\n\n

Overview

\n\n

We will be using the New York City Airbnb dataset, which is hosted on kaggle here. I've uploaded it to the Hugging Face Hub as a dataset here for easier use and download. Using this data we will plot Airbnb locations on a map output and allow filtering based on price and location. Below is the demo that we will be building. \u26a1\ufe0f

\n\n

\n\n

Step 1 - Loading CSV data \ud83d\udcbe

\n\n

Let's start by loading the Airbnb NYC data from the Hugging Face Hub.

\n\n
from datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n    new_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n            (df['price'] > min_price) & (df['price'] < max_price)]\n    names = new_df[\"name\"].tolist()\n    prices = new_df[\"price\"].tolist()\n    text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n
\n\n

In the code above, we first load the csv data into a pandas dataframe. Let's begin by defining a function that we will use as the prediction function for the gradio app. This function will accept the minimum price and maximum price range as well as the list of boroughs to filter the resulting map. We can use the passed in values (min_price, max_price, and list of boroughs) to filter the dataframe and create new_df. Next we will create text_list of the names and prices of each Airbnb to use as labels on the map.

\n\n

Step 2 - Map Figure \ud83c\udf10

\n\n

Plotly makes it easy to work with maps. Let's take a look below how we can create a map figure.

\n\n
import plotly.graph_objects as go\n\nfig = go.Figure(go.Scattermapbox(\n            customdata=text_list,\n            lat=new_df['latitude'].tolist(),\n            lon=new_df['longitude'].tolist(),\n            mode='markers',\n            marker=go.scattermapbox.Marker(\n                size=6\n            ),\n            hoverinfo=\"text\",\n            hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\nfig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n)\n
\n\n

Above, we create a scatter plot on mapbox by passing it our list of latitudes and longitudes to plot markers. We also pass in our custom data of names and prices for additional info to appear on every marker we hover over. Next we use update_layout to specify other map settings such as zoom, and centering.

\n\n

More info here on scatter plots using Mapbox and Plotly.

\n\n

Step 3 - Gradio App \u26a1\ufe0f

\n\n

We will use two gr.Number components and a gr.CheckboxGroup to allow users of our app to specify price ranges and borough locations. We will then use the gr.Plot component as an output for our Plotly + Mapbox map we created earlier.

\n\n
with gr.Blocks() as demo:\n    with gr.Column():\n        with gr.Row():\n            min_price = gr.Number(value=250, label=\"Minimum Price\")\n            max_price = gr.Number(value=1000, label=\"Maximum Price\")\n        boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n        btn = gr.Button(value=\"Update Filter\")\n        map = gr.Plot()\n    demo.load(filter_map, [min_price, max_price, boroughs], map)\n    btn.click(filter_map, [min_price, max_price, boroughs], map)\n
\n\n

We layout these components using the gr.Column and gr.Row and we'll also add event triggers for when the demo first loads and when our \"Update Filter\" button is clicked in order to trigger the map to update with our new filters.

\n\n

This is what the full demo code looks like:

\n\n
import gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n    filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n          (df['price'] > min_price) & (df['price'] < max_price)]\n    names = filtered_df[\"name\"].tolist()\n    prices = filtered_df[\"price\"].tolist()\n    text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n    fig = go.Figure(go.Scattermapbox(\n            customdata=text_list,\n            lat=filtered_df['latitude'].tolist(),\n            lon=filtered_df['longitude'].tolist(),\n            mode='markers',\n            marker=go.scattermapbox.Marker(\n                size=6\n            ),\n            hoverinfo=\"text\",\n            hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\ndemo.launch()\n
\n\n

Step 4 - Deployment \ud83e\udd17

\n\n

If you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the share=True parameter to launch.

\n\n

But what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.

\n\n

If you haven't used Spaces before, follow the previous guide here.

\n\n

Conclusion \ud83c\udf89

\n\n

And you're all done! That's all the code you need to build a map demo.

\n\n

Here's a link to the demo Map demo and complete code (on Hugging Face Spaces)

\n", "tags": ["PLOTS", "MAPS"], "spaces": [], "url": "/guides/plot-component-for-maps/", "contributor": null}, {"name": "using-gradio-for-tabular-workflows", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 28, "pretty_name": "Using Gradio For Tabular Workflows", "content": "# Using Gradio for Tabular Data Science Workflows\n\n\n\n\n## Introduction\n\nTabular data science is the most widely used domain of machine learning, with problems ranging from customer segmentation to churn prediction. Throughout various stages of the tabular data science workflow, communicating your work to stakeholders or clients can be cumbersome; which prevents data scientists from focusing on what matters, such as data analysis and model building. Data scientists can end up spending hours building a dashboard that takes in dataframe and returning plots, or returning a prediction or plot of clusters in a dataset. In this guide, we'll go through how to use `gradio` to improve your data science workflows. We will also talk about how to use `gradio` and [skops](https://skops.readthedocs.io/en/stable/) to build interfaces with only one line of code!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started).\n\n## Let's Create a Simple Interface!\n\nWe will take a look at how we can create a simple UI that predicts failures based on product information. \n\n```python\nimport gradio as gr\nimport pandas as pd\nimport joblib\nimport datasets\n\n\ninputs = [gr.Dataframe(row_count = (2, \"dynamic\"), col_count=(4,\"dynamic\"), label=\"Input Data\", interactive=1)]\n\noutputs = [gr.Dataframe(row_count = (2, \"dynamic\"), col_count=(1, \"fixed\"), label=\"Predictions\", headers=[\"Failures\"])]\n\nmodel = joblib.load(\"model.pkl\")\n\n# we will give our dataframe as example\ndf = datasets.load_dataset(\"merve/supersoaker-failures\")\ndf = df[\"train\"].to_pandas()\n\ndef infer(input_dataframe):\n return pd.DataFrame(model.predict(input_dataframe))\n \ngr.Interface(fn = infer, inputs = inputs, outputs = outputs, examples = [[df.head(2)]]).launch()\n```\n\nLet's break down above code.\n\n* `fn`: the inference function that takes input dataframe and returns predictions.\n* `inputs`: the component we take our input with. We define our input as dataframe with 2 rows and 4 columns, which initially will look like an empty dataframe with the aforementioned shape. When the `row_count` is set to `dynamic`, you don't have to rely on the dataset you're inputting to pre-defined component.\n* `outputs`: The dataframe component that stores outputs. This UI can take single or multiple samples to infer, and returns 0 or 1 for each sample in one column, so we give `row_count` as 2 and `col_count` as 1 above. `headers` is a list made of header names for dataframe.\n* `examples`: You can either pass the input by dragging and dropping a CSV file, or a pandas DataFrame through examples, which headers will be automatically taken by the interface.\n\nWe will now create an example for a minimal data visualization dashboard. You can find a more comprehensive version in the related Spaces.\n\n\n\n```python\nimport gradio as gr\nimport pandas as pd\nimport datasets\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndf = datasets.load_dataset(\"merve/supersoaker-failures\")\ndf = df[\"train\"].to_pandas()\ndf.dropna(axis=0, inplace=True)\n\ndef plot(df):\n plt.scatter(df.measurement_13, df.measurement_15, c = df.loading,alpha=0.5)\n plt.savefig(\"scatter.png\")\n df['failure'].value_counts().plot(kind='bar')\n plt.savefig(\"bar.png\")\n sns.heatmap(df.select_dtypes(include=\"number\").corr())\n plt.savefig(\"corr.png\")\n plots = [\"corr.png\",\"scatter.png\", \"bar.png\"]\n return plots\n \ninputs = [gr.Dataframe(label=\"Supersoaker Production Data\")]\noutputs = [gr.Gallery(label=\"Profiling Dashboard\").style(grid=(1,3))]\n\ngr.Interface(plot, inputs=inputs, outputs=outputs, examples=[df.head(100)], title=\"Supersoaker Failures Analysis Dashboard\").launch()\n```\n\n\n\nWe will use the same dataset we used to train our model, but we will make a dashboard to visualize it this time. \n\n* `fn`: The function that will create plots based on data.\n* `inputs`: We use the same `Dataframe` component we used above.\n* `outputs`: The `Gallery` component is used to keep our visualizations.\n* `examples`: We will have the dataset itself as the example.\n\n## Easily load tabular data interfaces with one line of code using skops\n\n`skops` is a library built on top of `huggingface_hub` and `sklearn`. With the recent `gradio` integration of `skops`, you can build tabular data interfaces with one line of code!\n\n```python\nimport gradio as gr\n\n# title and description are optional\ntitle = \"Supersoaker Defective Product Prediction\"\ndescription = \"This model predicts Supersoaker production line failures. Drag and drop any slice from dataset or edit values as you wish in below dataframe component.\"\n\ngr.Interface.load(\"huggingface/scikit-learn/tabular-playground\", title=title, description=description).launch()\n```\n\n\n\n`sklearn` models pushed to Hugging Face Hub using `skops` include a `config.json` file that contains an example input with column names, the task being solved (that can either be `tabular-classification` or `tabular-regression`). From the task type, `gradio` constructs the `Interface` and consumes column names and the example input to build it. You can [refer to skops documentation on hosting models on Hub](https://skops.readthedocs.io/en/latest/auto_examples/plot_hf_hub.html#sphx-glr-auto-examples-plot-hf-hub-py) to learn how to push your models to Hub using `skops`.\n", "html": "

Using Gradio for Tabular Data Science Workflows

\n\n

Introduction

\n\n

Tabular data science is the most widely used domain of machine learning, with problems ranging from customer segmentation to churn prediction. Throughout various stages of the tabular data science workflow, communicating your work to stakeholders or clients can be cumbersome; which prevents data scientists from focusing on what matters, such as data analysis and model building. Data scientists can end up spending hours building a dashboard that takes in dataframe and returning plots, or returning a prediction or plot of clusters in a dataset. In this guide, we'll go through how to use gradio to improve your data science workflows. We will also talk about how to use gradio and skops to build interfaces with only one line of code!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Let's Create a Simple Interface!

\n\n

We will take a look at how we can create a simple UI that predicts failures based on product information.

\n\n
import gradio as gr\nimport pandas as pd\nimport joblib\nimport datasets\n\n\ninputs = [gr.Dataframe(row_count = (2, \"dynamic\"), col_count=(4,\"dynamic\"), label=\"Input Data\", interactive=1)]\n\noutputs = [gr.Dataframe(row_count = (2, \"dynamic\"), col_count=(1, \"fixed\"), label=\"Predictions\", headers=[\"Failures\"])]\n\nmodel = joblib.load(\"model.pkl\")\n\n# we will give our dataframe as example\ndf = datasets.load_dataset(\"merve/supersoaker-failures\")\ndf = df[\"train\"].to_pandas()\n\ndef infer(input_dataframe):\n  return pd.DataFrame(model.predict(input_dataframe))\n\ngr.Interface(fn = infer, inputs = inputs, outputs = outputs, examples = [[df.head(2)]]).launch()\n
\n\n

Let's break down above code.

\n\n
    \n
  • fn: the inference function that takes input dataframe and returns predictions.
  • \n
  • inputs: the component we take our input with. We define our input as dataframe with 2 rows and 4 columns, which initially will look like an empty dataframe with the aforementioned shape. When the row_count is set to dynamic, you don't have to rely on the dataset you're inputting to pre-defined component.
  • \n
  • outputs: The dataframe component that stores outputs. This UI can take single or multiple samples to infer, and returns 0 or 1 for each sample in one column, so we give row_count as 2 and col_count as 1 above. headers is a list made of header names for dataframe.
  • \n
  • examples: You can either pass the input by dragging and dropping a CSV file, or a pandas DataFrame through examples, which headers will be automatically taken by the interface.
  • \n
\n\n

We will now create an example for a minimal data visualization dashboard. You can find a more comprehensive version in the related Spaces.

\n\n

\n\n
import gradio as gr\nimport pandas as pd\nimport datasets\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndf = datasets.load_dataset(\"merve/supersoaker-failures\")\ndf = df[\"train\"].to_pandas()\ndf.dropna(axis=0, inplace=True)\n\ndef plot(df):\n  plt.scatter(df.measurement_13, df.measurement_15, c = df.loading,alpha=0.5)\n  plt.savefig(\"scatter.png\")\n  df['failure'].value_counts().plot(kind='bar')\n  plt.savefig(\"bar.png\")\n  sns.heatmap(df.select_dtypes(include=\"number\").corr())\n  plt.savefig(\"corr.png\")\n  plots = [\"corr.png\",\"scatter.png\", \"bar.png\"]\n  return plots\n\ninputs = [gr.Dataframe(label=\"Supersoaker Production Data\")]\noutputs = [gr.Gallery(label=\"Profiling Dashboard\").style(grid=(1,3))]\n\ngr.Interface(plot, inputs=inputs, outputs=outputs, examples=[df.head(100)], title=\"Supersoaker Failures Analysis Dashboard\").launch()\n
\n\n

\n\n

We will use the same dataset we used to train our model, but we will make a dashboard to visualize it this time.

\n\n
    \n
  • fn: The function that will create plots based on data.
  • \n
  • inputs: We use the same Dataframe component we used above.
  • \n
  • outputs: The Gallery component is used to keep our visualizations.
  • \n
  • examples: We will have the dataset itself as the example.
  • \n
\n\n

Easily load tabular data interfaces with one line of code using skops

\n\n

skops is a library built on top of huggingface_hub and sklearn. With the recent gradio integration of skops, you can build tabular data interfaces with one line of code!

\n\n
import gradio as gr\n\n# title and description are optional\ntitle = \"Supersoaker Defective Product Prediction\"\ndescription = \"This model predicts Supersoaker production line failures. Drag and drop any slice from dataset or edit values as you wish in below dataframe component.\"\n\ngr.Interface.load(\"huggingface/scikit-learn/tabular-playground\", title=title, description=description).launch()\n
\n\n

\n\n

sklearn models pushed to Hugging Face Hub using skops include a config.json file that contains an example input with column names, the task being solved (that can either be tabular-classification or tabular-regression). From the task type, gradio constructs the Interface and consumes column names and the example input to build it. You can refer to skops documentation on hosting models on Hub to learn how to push your models to Hub using skops.

\n", "tags": [], "spaces": ["https://huggingface.co/spaces/scikit-learn/gradio-skops-integration", "https://huggingface.co/spaces/scikit-learn/tabular-playground", "https://huggingface.co/spaces/merve/gradio-analysis-dashboard"], "url": "/guides/using-gradio-for-tabular-workflows/", "contributor": null}]}, {"category": "Client Libraries", "guides": [{"name": "getting-started-with-the-python-client", "category": "client-libraries", "pretty_category": "Client Libraries", "guide_index": 1, "absolute_index": 29, "pretty_name": "Getting Started With The Python Client", "content": "# Getting Started with the Gradio Python client \n\n\n\n\nThe Gradio Python client makes it very easy to use any Gradio app as an API. As an example, consider this [Hugging Face Space that transcribes audio files](https://huggingface.co/spaces/abidlabs/whisper) that are recorded from the microphone.\n\n![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/whisper-screenshot.jpg)\n\nUsing the `gradio_client` library, we can easily use the Gradio as an API to transcribe audio files programmatically.\n\nHere's the entire code to do it:\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/whisper\") \nclient.predict(\"audio_sample.wav\") \n\n>> \"This is a test of the whisper speech recognition model.\"\n```\n\nThe Gradio client works with any hosted Gradio app, whether it be an image generator, a text summarizer, a stateful chatbot, a tax calculator, or anything else! The Gradio Client is mostly used with apps hosted on [Hugging Face Spaces](https://hf.space), but your app can be hosted anywhere, such as your own server.\n\n**Prerequisites**: To use the Gradio client, you do *not* need to know the `gradio` library in great detail. However, it is helpful to have general familiarity with Gradio's concepts of input and output components.\n\n## Installation\n\nIf you already have a recent version of `gradio`, then the `gradio_client` is included as a dependency. \n\nOtherwise, the lightweight `gradio_client` package can be installed from pip (or pip3) and is tested to work with Python versions 3.9 or higher:\n\n```bash\n$ pip install gradio_client\n```\n\n\n## Connecting to a running Gradio App\n\nStart by connecting instantiating a `Client` object and connecting it to a Gradio app that is running on Hugging Face Spaces or generally anywhere on the web.\n\n## Connecting to a Hugging Face Space\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/en2fr\") # a Space that translates from English to French\n```\n\nYou can also connect to private Spaces by passing in your HF token with the `hf_token` parameter. You can get your HF token here: https://huggingface.co/settings/tokens\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/my-private-space\", hf_token=\"...\") \n```\n\n\n## Duplicating a Space for private use\n\nWhile you can use any public Space as an API, you may get rate limited by Hugging Face if you make too many requests. For unlimited usage of a Space, simply duplicate the Space to create a private Space,\nand then use it to make as many requests as you'd like! \n\nThe `gradio_client` includes a class method: `Client.duplicate()` to make this process simple (you'll need to pass in your [Hugging Face token](https://huggingface.co/settings/tokens) or be logged in using the Hugging Face CLI):\n\n```python\nimport os\nfrom gradio_client import Client\n\nHF_TOKEN = os.environ.get(\"HF_TOKEN\")\n\nclient = Client.duplicate(\"abidlabs/whisper\", hf_token=HF_TOKEN) \nclient.predict(\"audio_sample.wav\") \n\n>> \"This is a test of the whisper speech recognition model.\"\n```\n\nIf you have previously duplicated a Space, re-running `duplicate()` will *not* create a new Space. Instead, the Client will attach to the previously-created Space. So it is safe to re-run the `Client.duplicate()` method multiple times. \n\n**Note:** if the original Space uses GPUs, your private Space will as well, and your Hugging Face account will get billed based on the price of the GPU. To minimize charges, your Space will automatically go to sleep after 1 hour of inactivity. You can also set the hardware using the `hardware` parameter of `duplicate()`.\n\n\n## Connecting a general Gradio app\n\nIf your app is running somewhere else, just provide the full URL instead, including the \"http://\" or \"https://\". Here's an example of making predictions to a Gradio app that is running on a share URL:\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"https://bec81a83-5b5c-471e.gradio.live\")\n```\n\n## Inspecting the API endpoints\n\nOnce you have connected to a Gradio app, you can view the APIs that are available to you by calling the `Client.view_api()` method. For the Whisper Space, we see the following:\n\n```bash\nClient.predict() Usage Info\n---------------------------\nNamed API endpoints: 1\n\n - predict(input_audio, api_name=\"/predict\") -> value_0\n Parameters:\n - [Audio] input_audio: str (filepath or URL)\n Returns:\n - [Textbox] value_0: str (value)\n```\n\nThis shows us that we have 1 API endpoint in this space, and shows us how to use the API endpoint to make a prediction: we should call the `.predict()` method (which we will explore below), providing a parameter `input_audio` of type `str`, which is a `filepath or URL`. \n\nWe should also provide the `api_name='/predict'` argument to the `predict()` method. Although this isn't necessary if a Gradio app has only 1 named endpoint, it does allow us to call different endpoints in a single app if they are available. If an app has unnamed API endpoints, these can also be displayed by running `.view_api(all_endpoints=True)`.\n\n\n## Making a prediction\n\nThe simplest way to make a prediction is simply to call the `.predict()` function with the appropriate arguments:\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/en2fr\", api_name='/predict')\nclient.predict(\"Hello\")\n\n>> Bonjour\n```\n\nIf there are multiple parameters, then you should pass them as separate arguments to `.predict()`, like this:\n\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"gradio/calculator\")\nclient.predict(4, \"add\", 5)\n\n>> 9.0\n```\n\nFor certain inputs, such as images, you should pass in the filepath or URL to the file. Likewise, for the corresponding output types, you will get a filepath or URL returned. \n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/whisper\")\nclient.predict(\"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\")\n\n>> \"My thought I have nobody by a beauty and will as you poured. Mr. Rochester is serve in that so don't find simpus, and devoted abode, to at might in a r\u2014\"\n```\n\n\n## Running jobs asynchronously\n\nOe should note that `.predict()` is a *blocking* operation as it waits for the operation to complete before returning the prediction. \n\nIn many cases, you may be better off letting the job run in the background until you need the results of the prediction. You can do this by creating a `Job` instance using the `.submit()` method, and then later calling `.result()` on the job to get the result. For example:\n\n```python\nfrom gradio_client import Client\n\nclient = Client(space=\"abidlabs/en2fr\")\njob = client.submit(\"Hello\", api_name=\"/predict\") # This is not blocking\n\n# Do something else\n\njob.result() # This is blocking\n\n>> Bonjour\n```\n\n## Adding callbacks\n\nAlternatively, one can add one or more callbacks to perform actions after the job has completed running, like this:\n\n```python\nfrom gradio_client import Client\n\ndef print_result(x):\n print(\"The translated result is: {x}\")\n\nclient = Client(space=\"abidlabs/en2fr\")\n\njob = client.submit(\"Hello\", api_name=\"/predict\", result_callbacks=[print_result])\n\n# Do something else\n\n>> The translated result is: Bonjour\n\n```\n\n## Status\n\nThe `Job` object also allows you to get the status of the running job by calling the `.status()` method. This returns a `StatusUpdate` object with the following attributes: `code` (the status code, one of a set of defined strings representing the status. See the `utils.Status` class), `rank` (the current position of this job in the queue), `queue_size` (the total queue size), `eta` (estimated time this job will complete), `success` (a boolean representing whether the job completed successfully), and `time` (the time that the status was generated). \n\n```py\nfrom gradio_client import Client\n\nclient = Client(src=\"gradio/calculator\")\njob = client.submit(5, \"add\", 4, api_name=\"/predict\")\njob.status()\n\n>> \n```\n\n*Note*: The `Job` class also has a `.done()` instance method which returns a boolean indicating whether the job has completed.\n\n## Cancelling Jobs\n\nThe `Job` class also has a `.cancel()` instance method that cancels jobs that have been queued but not started. For example, if you run:\n\n```py\nclient = Client(\"abidlabs/whisper\") \njob1 = client.submit(\"audio_sample1.wav\") \njob2 = client.submit(\"audio_sample2.wav\") \njob1.cancel() # will return False, assuming the job has started\njob2.cancel() # will return True, indicating that the job has been canceled\n```\n\nIf the first job has started processing, then it will not be canceled. If the second job\nhas not yet started, it will be successfully canceled and removed from the queue. \n\n\n## Generator Endpoints\n\nSome Gradio API endpoints do not return a single value, rather they return a series of values. You can get the series of values that have been returned at any time from such a generator endpoint by running `job.outputs()`:\n\n```py\nfrom gradio_client import Client\n\nclient = Client(src=\"gradio/count_generator\")\njob = client.submit(3, api_name=\"/count\")\nwhile not job.done():\n time.sleep(0.1)\njob.outputs()\n\n>> ['0', '1', '2']\n```\n\nNote that running `job.result()` on a generator endpoint only gives you the *first* value returned by the endpoint. \n\nThe `Job` object is also iterable, which means you can use it to display the results of a generator function as they are returned from the endpoint. Here's the equivalent example using the `Job` as a generator:\n\n```py\nfrom gradio_client import Client\n\nclient = Client(src=\"gradio/count_generator\")\njob = client.submit(3, api_name=\"/count\")\n\nfor o in job:\n print(o)\n\n>> 0\n>> 1\n>> 2\n```\n\nYou can also cancel jobs that that have iterative outputs, in which case the job will finish as soon as the current iteration finishes running.\n\n```py\nfrom gradio_client import Client\nimport time\n\nclient = Client(\"abidlabs/test-yield\")\njob = client.submit(\"abcdef\")\ntime.sleep(3)\njob.cancel() # job cancels after 2 iterations\n```", "html": "

Getting Started with the Gradio Python client

\n\n

The Gradio Python client makes it very easy to use any Gradio app as an API. As an example, consider this Hugging Face Space that transcribes audio files that are recorded from the microphone.

\n\n

\"\"

\n\n

Using the gradio_client library, we can easily use the Gradio as an API to transcribe audio files programmatically.

\n\n

Here's the entire code to do it:

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/whisper\") \nclient.predict(\"audio_sample.wav\")  \n\n>> \"This is a test of the whisper speech recognition model.\"\n
\n\n

The Gradio client works with any hosted Gradio app, whether it be an image generator, a text summarizer, a stateful chatbot, a tax calculator, or anything else! The Gradio Client is mostly used with apps hosted on Hugging Face Spaces, but your app can be hosted anywhere, such as your own server.

\n\n

Prerequisites: To use the Gradio client, you do not need to know the gradio library in great detail. However, it is helpful to have general familiarity with Gradio's concepts of input and output components.

\n\n

Installation

\n\n

If you already have a recent version of gradio, then the gradio_client is included as a dependency.

\n\n

Otherwise, the lightweight gradio_client package can be installed from pip (or pip3) and is tested to work with Python versions 3.9 or higher:

\n\n
$ pip install gradio_client\n
\n\n

Connecting to a running Gradio App

\n\n

Start by connecting instantiating a Client object and connecting it to a Gradio app that is running on Hugging Face Spaces or generally anywhere on the web.

\n\n

Connecting to a Hugging Face Space

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/en2fr\")  # a Space that translates from English to French\n
\n\n

You can also connect to private Spaces by passing in your HF token with the hf_token parameter. You can get your HF token here: https://huggingface.co/settings/tokens

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/my-private-space\", hf_token=\"...\") \n
\n\n

Duplicating a Space for private use

\n\n

While you can use any public Space as an API, you may get rate limited by Hugging Face if you make too many requests. For unlimited usage of a Space, simply duplicate the Space to create a private Space,\nand then use it to make as many requests as you'd like!

\n\n

The gradio_client includes a class method: Client.duplicate() to make this process simple (you'll need to pass in your Hugging Face token or be logged in using the Hugging Face CLI):

\n\n
import os\nfrom gradio_client import Client\n\nHF_TOKEN = os.environ.get(\"HF_TOKEN\")\n\nclient = Client.duplicate(\"abidlabs/whisper\", hf_token=HF_TOKEN) \nclient.predict(\"audio_sample.wav\")  \n\n>> \"This is a test of the whisper speech recognition model.\"\n
\n\n

If you have previously duplicated a Space, re-running duplicate() will not create a new Space. Instead, the Client will attach to the previously-created Space. So it is safe to re-run the Client.duplicate() method multiple times.

\n\n

Note: if the original Space uses GPUs, your private Space will as well, and your Hugging Face account will get billed based on the price of the GPU. To minimize charges, your Space will automatically go to sleep after 1 hour of inactivity. You can also set the hardware using the hardware parameter of duplicate().

\n\n

Connecting a general Gradio app

\n\n

If your app is running somewhere else, just provide the full URL instead, including the \"http://\" or \"https://\". Here's an example of making predictions to a Gradio app that is running on a share URL:

\n\n
from gradio_client import Client\n\nclient = Client(\"https://bec81a83-5b5c-471e.gradio.live\")\n
\n\n

Inspecting the API endpoints

\n\n

Once you have connected to a Gradio app, you can view the APIs that are available to you by calling the Client.view_api() method. For the Whisper Space, we see the following:

\n\n
Client.predict() Usage Info\n---------------------------\nNamed API endpoints: 1\n\n - predict(input_audio, api_name=\"/predict\") -> value_0\n    Parameters:\n     - [Audio] input_audio: str (filepath or URL)\n    Returns:\n     - [Textbox] value_0: str (value)\n
\n\n

This shows us that we have 1 API endpoint in this space, and shows us how to use the API endpoint to make a prediction: we should call the .predict() method (which we will explore below), providing a parameter input_audio of type str, which is a filepath or URL.

\n\n

We should also provide the api_name='/predict' argument to the predict() method. Although this isn't necessary if a Gradio app has only 1 named endpoint, it does allow us to call different endpoints in a single app if they are available. If an app has unnamed API endpoints, these can also be displayed by running .view_api(all_endpoints=True).

\n\n

Making a prediction

\n\n

The simplest way to make a prediction is simply to call the .predict() function with the appropriate arguments:

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/en2fr\", api_name='/predict')\nclient.predict(\"Hello\")\n\n>> Bonjour\n
\n\n

If there are multiple parameters, then you should pass them as separate arguments to .predict(), like this:

\n\n
from gradio_client import Client\n\nclient = Client(\"gradio/calculator\")\nclient.predict(4, \"add\", 5)\n\n>> 9.0\n
\n\n

For certain inputs, such as images, you should pass in the filepath or URL to the file. Likewise, for the corresponding output types, you will get a filepath or URL returned.

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/whisper\")\nclient.predict(\"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\")\n\n>> \"My thought I have nobody by a beauty and will as you poured. Mr. Rochester is serve in that so don't find simpus, and devoted abode, to at might in a r\u2014\"\n
\n\n

Running jobs asynchronously

\n\n

Oe should note that .predict() is a blocking operation as it waits for the operation to complete before returning the prediction.

\n\n

In many cases, you may be better off letting the job run in the background until you need the results of the prediction. You can do this by creating a Job instance using the .submit() method, and then later calling .result() on the job to get the result. For example:

\n\n
from gradio_client import Client\n\nclient = Client(space=\"abidlabs/en2fr\")\njob = client.submit(\"Hello\", api_name=\"/predict\")  # This is not blocking\n\n# Do something else\n\njob.result()  # This is blocking\n\n>> Bonjour\n
\n\n

Adding callbacks

\n\n

Alternatively, one can add one or more callbacks to perform actions after the job has completed running, like this:

\n\n
from gradio_client import Client\n\ndef print_result(x):\n    print(\"The translated result is: {x}\")\n\nclient = Client(space=\"abidlabs/en2fr\")\n\njob = client.submit(\"Hello\", api_name=\"/predict\", result_callbacks=[print_result])\n\n# Do something else\n\n>> The translated result is: Bonjour\n\n
\n\n

Status

\n\n

The Job object also allows you to get the status of the running job by calling the .status() method. This returns a StatusUpdate object with the following attributes: code (the status code, one of a set of defined strings representing the status. See the utils.Status class), rank (the current position of this job in the queue), queue_size (the total queue size), eta (estimated time this job will complete), success (a boolean representing whether the job completed successfully), and time (the time that the status was generated).

\n\n
from gradio_client import Client\n\nclient = Client(src=\"gradio/calculator\")\njob = client.submit(5, \"add\", 4, api_name=\"/predict\")\njob.status()\n\n>> \n
\n\n

Note: The Job class also has a .done() instance method which returns a boolean indicating whether the job has completed.

\n\n

Cancelling Jobs

\n\n

The Job class also has a .cancel() instance method that cancels jobs that have been queued but not started. For example, if you run:

\n\n
client = Client(\"abidlabs/whisper\") \njob1 = client.submit(\"audio_sample1.wav\")  \njob2 = client.submit(\"audio_sample2.wav\")  \njob1.cancel()  # will return False, assuming the job has started\njob2.cancel()  # will return True, indicating that the job has been canceled\n
\n\n

If the first job has started processing, then it will not be canceled. If the second job\nhas not yet started, it will be successfully canceled and removed from the queue.

\n\n

Generator Endpoints

\n\n

Some Gradio API endpoints do not return a single value, rather they return a series of values. You can get the series of values that have been returned at any time from such a generator endpoint by running job.outputs():

\n\n
from gradio_client import Client\n\nclient = Client(src=\"gradio/count_generator\")\njob = client.submit(3, api_name=\"/count\")\nwhile not job.done():\n    time.sleep(0.1)\njob.outputs()\n\n>> ['0', '1', '2']\n
\n\n

Note that running job.result() on a generator endpoint only gives you the first value returned by the endpoint.

\n\n

The Job object is also iterable, which means you can use it to display the results of a generator function as they are returned from the endpoint. Here's the equivalent example using the Job as a generator:

\n\n
from gradio_client import Client\n\nclient = Client(src=\"gradio/count_generator\")\njob = client.submit(3, api_name=\"/count\")\n\nfor o in job:\n    print(o)\n\n>> 0\n>> 1\n>> 2\n
\n\n

You can also cancel jobs that that have iterative outputs, in which case the job will finish as soon as the current iteration finishes running.

\n\n
from gradio_client import Client\nimport time\n\nclient = Client(\"abidlabs/test-yield\")\njob = client.submit(\"abcdef\")\ntime.sleep(3)\njob.cancel()  # job cancels after 2 iterations\n
\n", "tags": ["CLIENT", "API", "SPACES"], "spaces": [], "url": "/guides/getting-started-with-the-python-client/", "contributor": null}, {"name": "getting-started-with-the-js-client", "category": "client-libraries", "pretty_category": "Client Libraries", "guide_index": 2, "absolute_index": 30, "pretty_name": "Getting Started With The Js Client", "content": "# Getting Started with the Gradio JavaScript client\n\n\n\nThe Gradio JavaScript client makes it very easy to use any Gradio app as an API. As an example, consider this [Hugging Face Space that transcribes audio files](https://huggingface.co/spaces/abidlabs/whisper) that are recorded from the microphone.\n\n![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/whisper-screenshot.jpg)\n\nUsing the `@gradio/client` library, we can easily use the Gradio as an API to transcribe audio files programmatically.\n\nHere's the entire code to do it:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst response = await fetch(\n \"https://github.com/audio-samples/audio-samples.github.io/raw/master/samples/wav/ted_speakers/SalmanKhan/sample-1.wav\"\n);\nconst audio_file = await response.blob();\n\nconst app = await client(\"abidlabs/whisper\");\nconst transcription = await app.predict(\"/predict\", [audio_file]);\n\nconsole.log(transcription.data);\n// [ \"I said the same phrase 30 times.\" ]\n```\n\nThe Gradio client works with any hosted Gradio app, whether it be an image generator, a text summarizer, a stateful chatbot, a tax calculator, or anything else! The Gradio Client is mostly used with apps hosted on [Hugging Face Spaces](https://hf.space), but your app can be hosted anywhere, such as your own server.\n\n**Prequisites**: To use the Gradio client, you do _not_ need to know the `gradio` library in great detail. However, it is helpful to have general familiarity with Gradio's concepts of input and output components.\n\n## Installation\n\nThe lightweight `@gradio/client` package can be installed from the npm registry with a package manager of your choice and support node version 18 and above:\n\n```bash\nnpm i @gradio/client\n```\n\n## Connecting to a running Gradio App\n\nStart by connecting instantiating a `client` instance and connecting it to a Gradio app that is running on Hugging Face Spaces or generally anywhere on the web.\n\n## Connecting to a Hugging Face Space\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = client(\"abidlabs/en2fr\"); // a Space that translates from English to French\n```\n\nYou can also connect to private Spaces by passing in your HF token with the `hf_token` property of the options parameter. You can get your HF token here: https://huggingface.co/settings/tokens\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = client(\"abidlabs/my-private-space\", { hf_token=\"hf_...\" })\n```\n\n## Duplicating a Space for private use\n\nWhile you can use any public Space as an API, you may get rate limited by Hugging Face if you make too many requests. For unlimited usage of a Space, simply duplicate the Space to create a private Space, and then use it to make as many requests as you'd like!\n\nThe `@gradio/client` exports another function, `duplicate`, to make this process simple (you'll need to pass in your [Hugging Face token](https://huggingface.co/settings/tokens)).\n\n`duplicate` is almost identical to `client`, the only difference is under the hood:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst response = await fetch(\n \"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\"\n);\nconst audio_file = await response.blob();\n\nconst app = await duplicate(\"abidlabs/whisper\", { hf_token: \"hf_...\" });\nconst transcription = app.predict(\"/predict\", [audio_file]);\n```\n\nIf you have previously duplicated a Space, re-running `duplicate` will _not_ create a new Space. Instead, the client will attach to the previously-created Space. So it is safe to re-run the `duplicate` method multiple times with the same space.\n\n**Note:** if the original Space uses GPUs, your private Space will as well, and your Hugging Face account will get billed based on the price of the GPU. To minimize charges, your Space will automatically go to sleep after 5 minutes of inactivity. You can also set the hardware using the `hardware` and `timeout` properties of `duplicate`'s options object like this:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await duplicate(\"abidlabs/whisper\", {\n hf_token: \"hf_...\",\n timeout: 60,\n hardware: \"a10g-small\",\n});\n```\n\n## Connecting a general Gradio app\n\nIf your app is running somewhere else, just provide the full URL instead, including the \"http://\" or \"https://\". Here's an example of making predictions to a Gradio app that is running on a share URL:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = client(\"https://bec81a83-5b5c-471e.gradio.live\");\n```\n\n## Inspecting the API endpoints\n\nOnce you have connected to a Gradio app, you can view the APIs that are available to you by calling the `client`'s `view_api` method.\n\nFor the Whisper Space, we can do this:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/whisper\");\n\nconst app_info = await app.view_info();\n\nconsole.log(app_info);\n```\n\nAnd we will see the following:\n\n```json\n{\n \"named_endpoints\": {\n \"/predict\": {\n \"parameters\": [\n {\n \"label\": \"text\",\n \"component\": \"Textbox\",\n \"type\": \"string\"\n }\n ],\n \"returns\": [\n {\n \"label\": \"output\",\n \"component\": \"Textbox\",\n \"type\": \"string\"\n }\n ]\n }\n },\n \"unnamed_endpoints\": {}\n}\n```\n\nThis shows us that we have 1 API endpoint in this space, and shows us how to use the API endpoint to make a prediction: we should call the `.predict()` method (which we will explore below), providing a parameter `input_audio` of type `string`, which is a url to a file.\n\nWe should also provide the `api_name='/predict'` argument to the `predict()` method. Although this isn't necessary if a Gradio app has only 1 named endpoint, it does allow us to call different endpoints in a single app if they are available. If an app has unnamed API endpoints, these can also be displayed by running `.view_api(all_endpoints=True)`.\n\n## Making a prediction\n\nThe simplest way to make a prediction is simply to call the `.predict()` method with the appropriate arguments:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/en2fr\");\nconst result = await app.predict(\"/predict\", [\"Hello\"]);\n```\n\nIf there are multiple parameters, then you should pass them as an array to `.predict()`, like this:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/calculator\");\nconst result = await app.predict(\"/predict\", [4, \"add\", 5]);\n```\n\nFor certain inputs, such as images, you should pass in a `Buffer`, `Blob` or `File` depending on what is most convenient. In node, this would be a `Buffer` or `Blob`; in a browser environment, this would be a `Blob` or `File`.\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst response = await fetch(\n \"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\"\n);\nconst audio_file = await response.blob();\n\nconst app = await client(\"abidlabs/whisper\");\nconst result = await client.predict(\"/predict\", [audio_file]);\n```\n\n## Using events\n\nIf the API you are working with can return results over time, or you wish to access information about the status of a job, you can use the event interface for more flexibility. This is especially useful for iterative endpoints or generator endpoints that will produce a series of values over time as discreet responses.\n\n```js\nimport { client } from \"@gradio/client\";\n\nfunction log_result(payload) {\n const {\n data: [translation],\n } = payload;\n\n console.log(`The translated result is: ${translation}`);\n}\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job = app.submit(\"/predict\", [\"Hello\"]);\n\njob.on(\"data\", log_result);\n```\n\n## Status\n\nThe event interface also allows you to get the status of the running job by listening to the `\"status\"` event. This returns an object with the following attributes: `status` (a human readbale status of the current job, `\"pending\" | \"generating\" | \"complete\" | \"error\"`), `code` (the detailed gradio code for the job), `position` (the current position of this job in the queue), `queue_size` (the total queue size), `eta` (estimated time this job will complete), `success` (a boolean representing whether the job completed successfully), and `time` ( as `Date` object detailing the time that the status was generated).\n\n```js\nimport { client } from \"@gradio/client\";\n\nfunction log_status(status) {\n console.log(\n `The current status for this job is: ${JSON.stringify(status, null, 2)}.`\n );\n}\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job = app.submit(\"/predict\", [\"Hello\"]);\n\njob.on(\"status\", log_status);\n```\n\n## Cancelling Jobs\n\nThe job instance also has a `.cancel()` method that cancels jobs that have been queued but not started. For example, if you run:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job_one = app.submit(\"/predict\", [\"Hello\"]);\nconst job_two = app.submit(\"/predict\", [\"Friends\"]);\n\njob_one.cancel();\njob_two.cancel();\n```\n\nIf the first job has started processing, then it will not be canceled but the client will no longer listen for updates (throwing away the job). If the second job has not yet started, it will be successfully canceled and removed from the queue.\n\n## Generator Endpoints\n\nSome Gradio API endpoints do not return a single value, rather they return a series of values. You can listen for these values in real time using the event interface:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/count_generator\");\nconst job = app.submit(0, [9]);\n\njob.on(\"data\", (data) => console.log(data));\n```\n\nThis will log out the values as they are generated by the endpoint.\n\nYou can also cancel jobs that that have iterative outputs, in which case the job will finish immediately.\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/count_generator\");\nconst job = app.submit(0, [9]);\n\njob.on(\"data\", (data) => console.log(data));\n\nsetTimeout(() => {\n job.cancel();\n}, 3000);\n```\n", "html": "

Getting Started with the Gradio JavaScript client

\n\n

The Gradio JavaScript client makes it very easy to use any Gradio app as an API. As an example, consider this Hugging Face Space that transcribes audio files that are recorded from the microphone.

\n\n

\"\"

\n\n

Using the @gradio/client library, we can easily use the Gradio as an API to transcribe audio files programmatically.

\n\n

Here's the entire code to do it:

\n\n
import { client } from \"@gradio/client\";\n\nconst response = await fetch(\n  \"https://github.com/audio-samples/audio-samples.github.io/raw/master/samples/wav/ted_speakers/SalmanKhan/sample-1.wav\"\n);\nconst audio_file = await response.blob();\n\nconst app = await client(\"abidlabs/whisper\");\nconst transcription = await app.predict(\"/predict\", [audio_file]);\n\nconsole.log(transcription.data);\n// [ \"I said the same phrase 30 times.\" ]\n
\n\n

The Gradio client works with any hosted Gradio app, whether it be an image generator, a text summarizer, a stateful chatbot, a tax calculator, or anything else! The Gradio Client is mostly used with apps hosted on Hugging Face Spaces, but your app can be hosted anywhere, such as your own server.

\n\n

Prequisites: To use the Gradio client, you do not need to know the gradio library in great detail. However, it is helpful to have general familiarity with Gradio's concepts of input and output components.

\n\n

Installation

\n\n

The lightweight @gradio/client package can be installed from the npm registry with a package manager of your choice and support node version 18 and above:

\n\n
npm i @gradio/client\n
\n\n

Connecting to a running Gradio App

\n\n

Start by connecting instantiating a client instance and connecting it to a Gradio app that is running on Hugging Face Spaces or generally anywhere on the web.

\n\n

Connecting to a Hugging Face Space

\n\n
import { client } from \"@gradio/client\";\n\nconst app = client(\"abidlabs/en2fr\"); // a Space that translates from English to French\n
\n\n

You can also connect to private Spaces by passing in your HF token with the hf_token property of the options parameter. You can get your HF token here: https://huggingface.co/settings/tokens

\n\n
import { client } from \"@gradio/client\";\n\nconst app = client(\"abidlabs/my-private-space\", { hf_token=\"hf_...\" })\n
\n\n

Duplicating a Space for private use

\n\n

While you can use any public Space as an API, you may get rate limited by Hugging Face if you make too many requests. For unlimited usage of a Space, simply duplicate the Space to create a private Space, and then use it to make as many requests as you'd like!

\n\n

The @gradio/client exports another function, duplicate, to make this process simple (you'll need to pass in your Hugging Face token).

\n\n

duplicate is almost identical to client, the only difference is under the hood:

\n\n
import { client } from \"@gradio/client\";\n\nconst response = await fetch(\n  \"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\"\n);\nconst audio_file = await response.blob();\n\nconst app = await duplicate(\"abidlabs/whisper\", { hf_token: \"hf_...\" });\nconst transcription = app.predict(\"/predict\", [audio_file]);\n
\n\n

If you have previously duplicated a Space, re-running duplicate will not create a new Space. Instead, the client will attach to the previously-created Space. So it is safe to re-run the duplicate method multiple times with the same space.

\n\n

Note: if the original Space uses GPUs, your private Space will as well, and your Hugging Face account will get billed based on the price of the GPU. To minimize charges, your Space will automatically go to sleep after 5 minutes of inactivity. You can also set the hardware using the hardware and timeout properties of duplicate's options object like this:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await duplicate(\"abidlabs/whisper\", {\n  hf_token: \"hf_...\",\n  timeout: 60,\n  hardware: \"a10g-small\",\n});\n
\n\n

Connecting a general Gradio app

\n\n

If your app is running somewhere else, just provide the full URL instead, including the \"http://\" or \"https://\". Here's an example of making predictions to a Gradio app that is running on a share URL:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = client(\"https://bec81a83-5b5c-471e.gradio.live\");\n
\n\n

Inspecting the API endpoints

\n\n

Once you have connected to a Gradio app, you can view the APIs that are available to you by calling the client's view_api method.

\n\n

For the Whisper Space, we can do this:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/whisper\");\n\nconst app_info = await app.view_info();\n\nconsole.log(app_info);\n
\n\n

And we will see the following:

\n\n
{\n  \"named_endpoints\": {\n    \"/predict\": {\n      \"parameters\": [\n        {\n          \"label\": \"text\",\n          \"component\": \"Textbox\",\n          \"type\": \"string\"\n        }\n      ],\n      \"returns\": [\n        {\n          \"label\": \"output\",\n          \"component\": \"Textbox\",\n          \"type\": \"string\"\n        }\n      ]\n    }\n  },\n  \"unnamed_endpoints\": {}\n}\n
\n\n

This shows us that we have 1 API endpoint in this space, and shows us how to use the API endpoint to make a prediction: we should call the .predict() method (which we will explore below), providing a parameter input_audio of type string, which is a url to a file.

\n\n

We should also provide the api_name='/predict' argument to the predict() method. Although this isn't necessary if a Gradio app has only 1 named endpoint, it does allow us to call different endpoints in a single app if they are available. If an app has unnamed API endpoints, these can also be displayed by running .view_api(all_endpoints=True).

\n\n

Making a prediction

\n\n

The simplest way to make a prediction is simply to call the .predict() method with the appropriate arguments:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/en2fr\");\nconst result = await app.predict(\"/predict\", [\"Hello\"]);\n
\n\n

If there are multiple parameters, then you should pass them as an array to .predict(), like this:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/calculator\");\nconst result = await app.predict(\"/predict\", [4, \"add\", 5]);\n
\n\n

For certain inputs, such as images, you should pass in a Buffer, Blob or File depending on what is most convenient. In node, this would be a Buffer or Blob; in a browser environment, this would be a Blob or File.

\n\n
import { client } from \"@gradio/client\";\n\nconst response = await fetch(\n  \"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\"\n);\nconst audio_file = await response.blob();\n\nconst app = await client(\"abidlabs/whisper\");\nconst result = await client.predict(\"/predict\", [audio_file]);\n
\n\n

Using events

\n\n

If the API you are working with can return results over time, or you wish to access information about the status of a job, you can use the event interface for more flexibility. This is especially useful for iterative endpoints or generator endpoints that will produce a series of values over time as discreet responses.

\n\n
import { client } from \"@gradio/client\";\n\nfunction log_result(payload) {\n  const {\n    data: [translation],\n  } = payload;\n\n  console.log(`The translated result is: ${translation}`);\n}\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job = app.submit(\"/predict\", [\"Hello\"]);\n\njob.on(\"data\", log_result);\n
\n\n

Status

\n\n

The event interface also allows you to get the status of the running job by listening to the \"status\" event. This returns an object with the following attributes: status (a human readbale status of the current job, \"pending\" | \"generating\" | \"complete\" | \"error\"), code (the detailed gradio code for the job), position (the current position of this job in the queue), queue_size (the total queue size), eta (estimated time this job will complete), success (a boolean representing whether the job completed successfully), and time ( as Date object detailing the time that the status was generated).

\n\n
import { client } from \"@gradio/client\";\n\nfunction log_status(status) {\n  console.log(\n    `The current status for this job is: ${JSON.stringify(status, null, 2)}.`\n  );\n}\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job = app.submit(\"/predict\", [\"Hello\"]);\n\njob.on(\"status\", log_status);\n
\n\n

Cancelling Jobs

\n\n

The job instance also has a .cancel() method that cancels jobs that have been queued but not started. For example, if you run:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job_one = app.submit(\"/predict\", [\"Hello\"]);\nconst job_two = app.submit(\"/predict\", [\"Friends\"]);\n\njob_one.cancel();\njob_two.cancel();\n
\n\n

If the first job has started processing, then it will not be canceled but the client will no longer listen for updates (throwing away the job). If the second job has not yet started, it will be successfully canceled and removed from the queue.

\n\n

Generator Endpoints

\n\n

Some Gradio API endpoints do not return a single value, rather they return a series of values. You can listen for these values in real time using the event interface:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/count_generator\");\nconst job = app.submit(0, [9]);\n\njob.on(\"data\", (data) => console.log(data));\n
\n\n

This will log out the values as they are generated by the endpoint.

\n\n

You can also cancel jobs that that have iterative outputs, in which case the job will finish immediately.

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/count_generator\");\nconst job = app.submit(0, [9]);\n\njob.on(\"data\", (data) => console.log(data));\n\nsetTimeout(() => {\n  job.cancel();\n}, 3000);\n
\n", "tags": ["CLIENT", "API", "SPACES"], "spaces": [], "url": "/guides/getting-started-with-the-js-client/", "contributor": null}, {"name": "fastapi-app-with-the-gradio-client", "category": "client-libraries", "pretty_category": "Client Libraries", "guide_index": null, "absolute_index": 31, "pretty_name": "Fastapi App With The Gradio Client", "content": "# Building a FastAPI App with the Gradio Python Client\n\n\n\nIn this blog post, we will demonstrate how to use the `gradio_client` [Python library](getting-started-with-the-python-client/), which enables developers to make requests to a Gradio app programmatically, by creating an example FastAPI web app. The web app we will be building is called \"Acapellify,\" and it will allow users to upload video files as input and return a version of that video without instrumental music. It will also display a gallery of generated videos.\n\n\n**Prerequisites**\n\nBefore we begin, make sure you are running Python 3.9 or later, and have the following libraries installed:\n\n* `gradio_client`\n* `fastapi`\n* `uvicorn`\n\nYou can install these libraries from `pip`: \n\n```bash\n$ pip install gradio_client fastapi uvicorn\n```\n\nYou will also need to have ffmpeg installed. You can check to see if you already have ffmpeg by running in your terminal:\n\n```bash\n$ ffmpeg version\n```\n\nOtherwise, install ffmpeg [by following these instructions](https://www.hostinger.com/tutorials/how-to-install-ffmpeg).\n\n## Step 1: Write the Video Processing Function\n\nLet's start with what seems like the most complex bit -- using machine learning to remove the music from a video. \n\nLuckily for us, there's an existing Space we can use to make this process easier: [https://huggingface.co/spaces/abidlabs/music-separation](https://huggingface.co/spaces/abidlabs/music-separation). This Space takes an audio file and produces two separate audio files: one with the instrumental music and one with all other sounds in the original clip. Perfect to use with our client! \n\nOpen a new Python file, say `main.py`, and start by importing the `Client` class from `gradio_client` and connecting it to this Space:\n\n```py\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/music-separation\")\n\ndef acapellify(audio_path):\n result = client.predict(audio_path, api_name=\"/predict\")\n return result[0]\n```\n\nThat's all the code that's needed -- notice that the API endpoints returns two audio files (one without the music, and one with just the music) in a list, and so we just return the first element of the list. \n\n---\n\n**Note**: since this is a public Space, there might be other users using this Space as well, which might result in a slow experience. You can duplicate this Space with your own [Hugging Face token](https://huggingface.co/settings/tokens) and create a private Space that only you have will have access to and bypass the queue. To do that, simply replace the first two lines above with: \n\n```py\nfrom gradio_client import Client\n\nclient = Client.duplicate(\"abidlabs/music-separation\", hf_token=YOUR_HF_TOKEN)\n```\n\nEverything else remains the same!\n\n---\n\nNow, of course, we are working with video files, so we first need to extract the audio from the video files. For this, we will be using the `ffmpeg` library, which does a lot of heavy lifting when it comes to working with audio and video files. The most common way to use `ffmpeg` is through the command line, which we'll call via Python's `subprocess` module:\n\nOur video processing workflow will consist of three steps: \n\n1. First, we start by taking in a video filepath and extracting the audio using `ffmpeg`. \n2. Then, we pass in the audio file through the `acapellify()` function above.\n3. Finally, we combine the new audio with the original video to produce a final acapellified video. \n\nHere's the complete code in Python, which you can add to your `main.py` file:\n\n```python\nimport subprocess\n\ndef process_video(video_path):\n old_audio = os.path.basename(video_path).split(\".\")[0] + \".m4a\"\n subprocess.run(['ffmpeg', '-y', '-i', video_path, '-vn', '-acodec', 'copy', old_audio])\n \n new_audio = acapellify(old_audio)\n \n new_video = f\"acap_{video_path}\"\n subprocess.call(['ffmpeg', '-y', '-i', video_path, '-i', new_audio, '-map', '0:v', '-map', '1:a', '-c:v', 'copy', '-c:a', 'aac', '-strict', 'experimental', f\"static/{new_video}\"])\n return new_video\n```\n\nYou can read up on [ffmpeg documentation](https://ffmpeg.org/ffmpeg.html) if you'd like to understand all of the command line parameters, as they are beyond the scope of this tutorial.\n\n## Step 2: Create a FastAPI app (Backend Routes)\n\nNext up, we'll create a simple FastAPI app. If you haven't used FastAPI before, check out [the great FastAPI docs](https://fastapi.tiangolo.com/). Otherwise, this basic template, which we add to `main.py`, will look pretty familiar:\n\n```python\nimport os\nfrom fastapi import FastAPI, File, UploadFile, Request\nfrom fastapi.responses import HTMLResponse, RedirectResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\n\napp = FastAPI()\nos.makedirs(\"static\", exist_ok=True)\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\ntemplates = Jinja2Templates(directory=\"templates\")\n\nvideos = []\n\n@app.get(\"/\", response_class=HTMLResponse)\nasync def home(request: Request):\n return templates.TemplateResponse(\n \"home.html\", {\"request\": request, \"videos\": videos})\n\n@app.post(\"/uploadvideo/\")\nasync def upload_video(video: UploadFile = File(...)):\n new_video = process_video(video.filename)\n videos.append(new_video)\n return RedirectResponse(url='/', status_code=303)\n```\n\nIn this example, the FastAPI app has two routes: `/` and `/uploadvideo/`.\n\nThe `/` route returns an HTML template that displays a gallery of all uploaded videos. \n\nThe `/uploadvideo/` route accepts a `POST` request with an `UploadFile` object, which represents the uploaded video file. The video file is \"acapellified\" via the `process_video()` method, and the output video is stored in a list which stores all of the uploaded videos in memory.\n\nNote that this is a very basic example and if this were a production app, you will need to add more logic to handle file storage, user authentication, and security considerations. \n\n## Step 3: Create a FastAPI app (Frontend Template)\n\nFinally, we create the frontend of our web application. First, we create a folder called `templates` in the same directory as `main.py`. We then create a template, `home.html` inside the `templates` folder. Here is the resulting file structure:\n\n```csv\n\u251c\u2500\u2500 main.py\n\u251c\u2500\u2500 templates\n\u2502 \u2514\u2500\u2500 home.html\n```\n\nWrite the following as the contents of `home.html`:\n\n```html\n<!DOCTYPE html>\n<html>\n <head>\n <title>Video Gallery</title>\n <style>\n body {\n font-family: sans-serif;\n margin: 0;\n padding: 0;\n background-color: #f5f5f5;\n }\n h1 {\n text-align: center;\n margin-top: 30px;\n margin-bottom: 20px;\n }\n .gallery {\n display: flex;\n flex-wrap: wrap;\n justify-content: center;\n gap: 20px;\n padding: 20px;\n }\n .video {\n border: 2px solid #ccc;\n box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.2);\n border-radius: 5px;\n overflow: hidden;\n width: 300px;\n margin-bottom: 20px;\n }\n .video video {\n width: 100%;\n height: 200px;\n }\n .video p {\n text-align: center;\n margin: 10px 0;\n }\n form {\n margin-top: 20px;\n text-align: center;\n }\n input[type=\"file\"] {\n display: none;\n }\n .upload-btn {\n display: inline-block;\n background-color: #3498db;\n color: #fff;\n padding: 10px 20px;\n font-size: 16px;\n border: none;\n border-radius: 5px;\n cursor: pointer;\n }\n .upload-btn:hover {\n background-color: #2980b9;\n }\n .file-name {\n margin-left: 10px;\n }\n </style>\n </head>\n <body>\n <h1>Video Gallery</h1>\n {% if videos %}\n <div class=\"gallery\">\n {% for video in videos %}\n <div class=\"video\">\n <video controls>\n <source src=\"{{ url_for('static', path=video) }}\" type=\"video/mp4\">\n Your browser does not support the video tag.\n </video>\n <p>{{ video }}</p>\n </div>\n {% endfor %}\n </div>\n {% else %}\n <p>No videos uploaded yet.</p>\n {% endif %}\n <form action=\"/uploadvideo/\" method=\"post\" enctype=\"multipart/form-data\">\n <label for=\"video-upload\" class=\"upload-btn\">Choose video file</label>\n <input type=\"file\" name=\"video\" id=\"video-upload\">\n <span class=\"file-name\"></span>\n <button type=\"submit\" class=\"upload-btn\">Upload</button>\n </form>\n <script>\n // Display selected file name in the form\n const fileUpload = document.getElementById(\"video-upload\");\n const fileName = document.querySelector(\".file-name\");\n\n fileUpload.addEventListener(\"change\", (e) => {\n fileName.textContent = e.target.files[0].name;\n });\n </script>\n </body>\n</html>\n```\n\n## Step 4: Run your FastAPI app\n\nFinally, we are ready to run our FastAPI app, powered by the Gradio Python Client!\n\nOpen up a terminal and navigate to the directory containing `main.py`. Then run the following command in the terminal:\n\n```bash\n$ uvicorn main:app\n```\n\nYou should see an output that looks like this:\n\n```csv\nLoaded as API: https://abidlabs-music-separation.hf.space \u2714\nINFO: Started server process [1360]\nINFO: Waiting for application startup.\nINFO: Application startup complete.\nINFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)\n```\n\nAnd that's it! Start uploading videos and you'll get some \"acapellified\" videos in response (might take seconds to minutes to process depending on the length of your videos). Here's how the UI looks after uploading two videos:\n\n![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/acapellify.png)\n\n If you'd like to learn more about how to use the Gradio Python Client in your projects, [read the dedicated Guide](/guides/getting-started-with-the-python-client/).\n\n", "html": "

Building a FastAPI App with the Gradio Python Client

\n\n

In this blog post, we will demonstrate how to use the gradio_client Python library, which enables developers to make requests to a Gradio app programmatically, by creating an example FastAPI web app. The web app we will be building is called \"Acapellify,\" and it will allow users to upload video files as input and return a version of that video without instrumental music. It will also display a gallery of generated videos.

\n\n

Prerequisites

\n\n

Before we begin, make sure you are running Python 3.9 or later, and have the following libraries installed:

\n\n
    \n
  • gradio_client
  • \n
  • fastapi
  • \n
  • uvicorn
  • \n
\n\n

You can install these libraries from pip:

\n\n
$ pip install gradio_client fastapi uvicorn\n
\n\n

You will also need to have ffmpeg installed. You can check to see if you already have ffmpeg by running in your terminal:

\n\n
$ ffmpeg version\n
\n\n

Otherwise, install ffmpeg by following these instructions.

\n\n

Step 1: Write the Video Processing Function

\n\n

Let's start with what seems like the most complex bit -- using machine learning to remove the music from a video.

\n\n

Luckily for us, there's an existing Space we can use to make this process easier: https://huggingface.co/spaces/abidlabs/music-separation. This Space takes an audio file and produces two separate audio files: one with the instrumental music and one with all other sounds in the original clip. Perfect to use with our client!

\n\n

Open a new Python file, say main.py, and start by importing the Client class from gradio_client and connecting it to this Space:

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/music-separation\")\n\ndef acapellify(audio_path):\n    result = client.predict(audio_path, api_name=\"/predict\")\n    return result[0]\n
\n\n

That's all the code that's needed -- notice that the API endpoints returns two audio files (one without the music, and one with just the music) in a list, and so we just return the first element of the list.

\n\n
\n\n

Note: since this is a public Space, there might be other users using this Space as well, which might result in a slow experience. You can duplicate this Space with your own Hugging Face token and create a private Space that only you have will have access to and bypass the queue. To do that, simply replace the first two lines above with:

\n\n
from gradio_client import Client\n\nclient = Client.duplicate(\"abidlabs/music-separation\", hf_token=YOUR_HF_TOKEN)\n
\n\n

Everything else remains the same!

\n\n
\n\n

Now, of course, we are working with video files, so we first need to extract the audio from the video files. For this, we will be using the ffmpeg library, which does a lot of heavy lifting when it comes to working with audio and video files. The most common way to use ffmpeg is through the command line, which we'll call via Python's subprocess module:

\n\n

Our video processing workflow will consist of three steps:

\n\n
    \n
  1. First, we start by taking in a video filepath and extracting the audio using ffmpeg.
  2. \n
  3. Then, we pass in the audio file through the acapellify() function above.
  4. \n
  5. Finally, we combine the new audio with the original video to produce a final acapellified video.
  6. \n
\n\n

Here's the complete code in Python, which you can add to your main.py file:

\n\n
import subprocess\n\ndef process_video(video_path):\n    old_audio = os.path.basename(video_path).split(\".\")[0] + \".m4a\"\n    subprocess.run(['ffmpeg', '-y', '-i', video_path, '-vn', '-acodec', 'copy', old_audio])\n\n    new_audio = acapellify(old_audio)\n\n    new_video = f\"acap_{video_path}\"\n    subprocess.call(['ffmpeg', '-y', '-i', video_path, '-i', new_audio, '-map', '0:v', '-map', '1:a', '-c:v', 'copy', '-c:a', 'aac', '-strict', 'experimental', f\"static/{new_video}\"])\n    return new_video\n
\n\n

You can read up on ffmpeg documentation if you'd like to understand all of the command line parameters, as they are beyond the scope of this tutorial.

\n\n

Step 2: Create a FastAPI app (Backend Routes)

\n\n

Next up, we'll create a simple FastAPI app. If you haven't used FastAPI before, check out the great FastAPI docs. Otherwise, this basic template, which we add to main.py, will look pretty familiar:

\n\n
import os\nfrom fastapi import FastAPI, File, UploadFile, Request\nfrom fastapi.responses import HTMLResponse, RedirectResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\n\napp = FastAPI()\nos.makedirs(\"static\", exist_ok=True)\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\ntemplates = Jinja2Templates(directory=\"templates\")\n\nvideos = []\n\n@app.get(\"/\", response_class=HTMLResponse)\nasync def home(request: Request):\n    return templates.TemplateResponse(\n        \"home.html\", {\"request\": request, \"videos\": videos})\n\n@app.post(\"/uploadvideo/\")\nasync def upload_video(video: UploadFile = File(...)):\n    new_video = process_video(video.filename)\n    videos.append(new_video)\n    return RedirectResponse(url='/', status_code=303)\n
\n\n

In this example, the FastAPI app has two routes: / and /uploadvideo/.

\n\n

The / route returns an HTML template that displays a gallery of all uploaded videos.

\n\n

The /uploadvideo/ route accepts a POST request with an UploadFile object, which represents the uploaded video file. The video file is \"acapellified\" via the process_video() method, and the output video is stored in a list which stores all of the uploaded videos in memory.

\n\n

Note that this is a very basic example and if this were a production app, you will need to add more logic to handle file storage, user authentication, and security considerations.

\n\n

Step 3: Create a FastAPI app (Frontend Template)

\n\n

Finally, we create the frontend of our web application. First, we create a folder called templates in the same directory as main.py. We then create a template, home.html inside the templates folder. Here is the resulting file structure:

\n\n
\u251c\u2500\u2500 main.py\n\u251c\u2500\u2500 templates\n\u2502   \u2514\u2500\u2500 home.html\n
\n\n

Write the following as the contents of home.html:

\n\n
<!DOCTYPE html>\n<html>\n  <head>\n    <title>Video Gallery</title>\n    <style>\n      body {\n        font-family: sans-serif;\n        margin: 0;\n        padding: 0;\n        background-color: #f5f5f5;\n      }\n      h1 {\n        text-align: center;\n        margin-top: 30px;\n        margin-bottom: 20px;\n      }\n      .gallery {\n        display: flex;\n        flex-wrap: wrap;\n        justify-content: center;\n        gap: 20px;\n        padding: 20px;\n      }\n      .video {\n        border: 2px solid #ccc;\n        box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.2);\n        border-radius: 5px;\n        overflow: hidden;\n        width: 300px;\n        margin-bottom: 20px;\n      }\n      .video video {\n        width: 100%;\n        height: 200px;\n      }\n      .video p {\n        text-align: center;\n        margin: 10px 0;\n      }\n      form {\n        margin-top: 20px;\n        text-align: center;\n      }\n      input[type=\"file\"] {\n        display: none;\n      }\n      .upload-btn {\n        display: inline-block;\n        background-color: #3498db;\n        color: #fff;\n        padding: 10px 20px;\n        font-size: 16px;\n        border: none;\n        border-radius: 5px;\n        cursor: pointer;\n      }\n      .upload-btn:hover {\n        background-color: #2980b9;\n      }\n      .file-name {\n        margin-left: 10px;\n      }\n    </style>\n  </head>\n  <body>\n    <h1>Video Gallery</h1>\n    {% if videos %}\n      <div class=\"gallery\">\n        {% for video in videos %}\n          <div class=\"video\">\n            <video controls>\n              <source src=\"{{ url_for('static', path=video) }}\" type=\"video/mp4\">\n              Your browser does not support the video tag.\n            </video>\n            <p>{{ video }}</p>\n          </div>\n        {% endfor %}\n      </div>\n    {% else %}\n      <p>No videos uploaded yet.</p>\n    {% endif %}\n    <form action=\"/uploadvideo/\" method=\"post\" enctype=\"multipart/form-data\">\n      <label for=\"video-upload\" class=\"upload-btn\">Choose video file</label>\n      <input type=\"file\" name=\"video\" id=\"video-upload\">\n      <span class=\"file-name\"></span>\n      <button type=\"submit\" class=\"upload-btn\">Upload</button>\n    </form>\n    <script>\n      // Display selected file name in the form\n      const fileUpload = document.getElementById(\"video-upload\");\n      const fileName = document.querySelector(\".file-name\");\n\n      fileUpload.addEventListener(\"change\", (e) => {\n        fileName.textContent = e.target.files[0].name;\n      });\n    </script>\n  </body>\n</html>\n
\n\n

Step 4: Run your FastAPI app

\n\n

Finally, we are ready to run our FastAPI app, powered by the Gradio Python Client!

\n\n

Open up a terminal and navigate to the directory containing main.py. Then run the following command in the terminal:

\n\n
$ uvicorn main:app\n
\n\n

You should see an output that looks like this:

\n\n
Loaded as API: https://abidlabs-music-separation.hf.space \u2714\nINFO:     Started server process [1360]\nINFO:     Waiting for application startup.\nINFO:     Application startup complete.\nINFO:     Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)\n
\n\n

And that's it! Start uploading videos and you'll get some \"acapellified\" videos in response (might take seconds to minutes to process depending on the length of your videos). Here's how the UI looks after uploading two videos:

\n\n

\"\"

\n\n

If you'd like to learn more about how to use the Gradio Python Client in your projects, read the dedicated Guide.

\n", "tags": ["CLIENT", "API", "WEB APP"], "spaces": [], "url": "/guides/fastapi-app-with-the-gradio-client/", "contributor": null}, {"name": "gradio-and-llm-agents", "category": "client-libraries", "pretty_category": "Client Libraries", "guide_index": null, "absolute_index": 32, "pretty_name": "Gradio And Llm Agents", "content": "# Gradio & LLM Agents \ud83e\udd1d\n\nLarge Language Models (LLMs) are very impressive but they can be made even more powerful if we could give them skills to accomplish specialized tasks.\n\nThe [gradio_tools](https://github.com/freddyaboulton/gradio-tools) library can turn any [Gradio](https://github.com/gradio-app/gradio) application into a [tool](https://python.langchain.com/en/latest/modules/agents/tools.html) that an [agent](https://docs.langchain.com/docs/components/agents/agent) can use to complete its task. For example, an LLM could use a Gradio tool to transcribe a voice recording it finds online and then summarize it for you. Or it could use a different Gradio tool to apply OCR to a document on your Google Drive and then answer questions about it.\n\nThis guide will show how you can use `gradio_tools` to grant your LLM Agent access to the cutting edge Gradio applications hosted in the world. Although `gradio_tools` are compatible with more than one agent framework, we will focus on [Langchain Agents](https://docs.langchain.com/docs/components/agents/) in this guide.\n\n## Some background\n\n### What are agents?\n\nA [LangChain agent](https://docs.langchain.com/docs/components/agents/agent) is a Large Language Model (LLM) that takes user input and reports an output based on using one of many tools at its disposal.\n\n### What is Gradio?\n[Gradio](https://github.com/gradio-app/gradio) is the defacto standard framework for building Machine Learning Web Applications and sharing them with the world - all with just python! \ud83d\udc0d\n\n## gradio_tools - An end-to-end example\n\nTo get started with `gradio_tools`, all you need to do is import and initialize your tools and pass them to the langchain agent!\n\nIn the following example, we import the `StableDiffusionPromptGeneratorTool` to create a good prompt for stable diffusion, the\n`StableDiffusionTool` to create an image with our improved prompt, the `ImageCaptioningTool` to caption the generated image, and\nthe `TextToVideoTool` to create a video from a prompt. \n\nWe then tell our agent to create an image of a dog riding a skateboard, but to please improve our prompt ahead of time. We also ask\nit to caption the generated image and create a video for it. The agent can decide which tool to use without us explicitly telling it.\n\n```python\nimport os\n\nif not os.getenv(\"OPENAI_API_KEY\"):\n raise ValueError(\"OPENAI_API_KEY must be set\")\n\nfrom langchain.agents import initialize_agent\nfrom langchain.llms import OpenAI\nfrom gradio_tools import (StableDiffusionTool, ImageCaptioningTool, StableDiffusionPromptGeneratorTool,\n TextToVideoTool)\n\nfrom langchain.memory import ConversationBufferMemory\n\nllm = OpenAI(temperature=0)\nmemory = ConversationBufferMemory(memory_key=\"chat_history\")\ntools = [StableDiffusionTool().langchain, ImageCaptioningTool().langchain,\n StableDiffusionPromptGeneratorTool().langchain, TextToVideoTool().langchain]\n\n\nagent = initialize_agent(tools, llm, memory=memory, agent=\"conversational-react-description\", verbose=True)\noutput = agent.run(input=(\"Please create a photo of a dog riding a skateboard \"\n \"but improve my prompt prior to using an image generator.\"\n \"Please caption the generated image and create a video for it using the improved prompt.\"))\n```\n\nYou'll note that we are using some pre-built tools that come with `gradio_tools`. Please see this [doc](https://github.com/freddyaboulton/gradio-tools#gradio-tools-gradio--llm-agents) for a complete list of the tools that come with `gradio_tools`.\nIf you would like to use a tool that's not currently in `gradio_tools`, it is very easy to add your own. That's what the next section will cover.\n\n## gradio_tools - creating your own tool\n\nThe core abstraction is the `GradioTool`, which lets you define a new tool for your LLM as long as you implement a standard interface:\n\n```python\nclass GradioTool(BaseTool):\n\n def __init__(self, name: str, description: str, src: str) -> None:\n\n @abstractmethod\n def create_job(self, query: str) -> Job:\n pass\n\n @abstractmethod\n def postprocess(self, output: Tuple[Any] | Any) -> str:\n pass\n```\nThe requirements are:\n1. The name for your tool\n2. The description for your tool. This is crucial! Agents decide which tool to use based on their description. Be precise and be sure to include example of what the input and the output of the tool should look like.\n3. The url or space id, e.g. `freddyaboulton/calculator`, of the Gradio application. Based on this value, `gradio_tool` will create a [gradio client](https://github.com/gradio-app/gradio/blob/main/client/python/README.md) instance to query the upstream application via API. Be sure to click the link and learn more about the gradio client library if you are not familiar with it.\n4. create_job - Given a string, this method should parse that string and return a job from the client. Most times, this is as simple as passing the string to the `submit` function of the client. More info on creating jobs [here](https://github.com/gradio-app/gradio/blob/main/client/python/README.md#making-a-prediction)\n5. postprocess - Given the result of the job, convert it to a string the LLM can display to the user.\n6. *Optional* - Some libraries, e.g. [MiniChain](https://github.com/srush/MiniChain/tree/main), may need some info about the underlying gradio input and output types used by the tool. By default, this will return gr.Textbox() but \nif you'd like to provide more accurate info, implement the `_block_input(self, gr)` and `_block_output(self, gr)` methods of the tool. The `gr` variable is the gradio module (the result of `import gradio as gr`). It will be\nautomatically imported by the `GradiTool` parent class and passed to the `_block_input` and `_block_output` methods.\n\nAnd that's it!\n\nOnce you have created your tool, open a pull request to the `gradio_tools` repo! We welcome all contributions.\n\n## Example tool - Stable Diffusion\n\nHere is the code for the StableDiffusion tool as an example:\n\n```python\nfrom gradio_tool import GradioTool\nimport os\n\nclass StableDiffusionTool(GradioTool):\n \"\"\"Tool for calling stable diffusion from llm\"\"\"\n\n def __init__(\n self,\n name=\"StableDiffusion\",\n description=(\n \"An image generator. Use this to generate images based on \"\n \"text input. Input should be a description of what the image should \"\n \"look like. The output will be a path to an image file.\"\n ),\n src=\"gradio-client-demos/stable-diffusion\",\n hf_token=None,\n ) -> None:\n super().__init__(name, description, src, hf_token)\n\n def create_job(self, query: str) -> Job:\n return self.client.submit(query, \"\", 9, fn_index=1)\n\n def postprocess(self, output: str) -> str:\n return [os.path.join(output, i) for i in os.listdir(output) if not i.endswith(\"json\")][0]\n\n def _block_input(self, gr) -> \"gr.components.Component\":\n return gr.Textbox()\n\n def _block_output(self, gr) -> \"gr.components.Component\":\n return gr.Image()\n```\n\nSome notes on this implementation:\n1. All instances of `GradioTool` have an attribute called `client` that is a pointed to the underlying [gradio client](https://github.com/gradio-app/gradio/tree/main/client/python#gradio_client-use-a-gradio-app-as-an-api----in-3-lines-of-python). That is what you should use\nin the `create_job` method.\n2. `create_job` just passes the query string to the `submit` function of the client with some other parameters hardcoded, i.e. the negative prompt string and the guidance scale. We could modify our tool to also accept these values from the input string in a subsequent version.\n3. The `postprocess` method simply returns the first image from the gallery of images created by the stable diffusion space. We use the `os` module to get the full path of the image.\n\n## Conclusion\n\nYou now know how to extend the abilities of your LLM with the 1000s of gradio spaces running in the wild!\nAgain, we welcome any contributions to the [gradio_tools](https://github.com/freddyaboulton/gradio-tools) library.\nWe're excited to see the tools you all build!\n\n", "html": "

Gradio & LLM Agents \ud83e\udd1d

\n\n

Large Language Models (LLMs) are very impressive but they can be made even more powerful if we could give them skills to accomplish specialized tasks.

\n\n

The gradio_tools library can turn any Gradio application into a tool that an agent can use to complete its task. For example, an LLM could use a Gradio tool to transcribe a voice recording it finds online and then summarize it for you. Or it could use a different Gradio tool to apply OCR to a document on your Google Drive and then answer questions about it.

\n\n

This guide will show how you can use gradio_tools to grant your LLM Agent access to the cutting edge Gradio applications hosted in the world. Although gradio_tools are compatible with more than one agent framework, we will focus on Langchain Agents in this guide.

\n\n

Some background

\n\n

What are agents?

\n\n

A LangChain agent is a Large Language Model (LLM) that takes user input and reports an output based on using one of many tools at its disposal.

\n\n

What is Gradio?

\n\n

Gradio is the defacto standard framework for building Machine Learning Web Applications and sharing them with the world - all with just python! \ud83d\udc0d

\n\n

gradio_tools - An end-to-end example

\n\n

To get started with gradio_tools, all you need to do is import and initialize your tools and pass them to the langchain agent!

\n\n

In the following example, we import the StableDiffusionPromptGeneratorTool to create a good prompt for stable diffusion, the\nStableDiffusionTool to create an image with our improved prompt, the ImageCaptioningTool to caption the generated image, and\nthe TextToVideoTool to create a video from a prompt.

\n\n

We then tell our agent to create an image of a dog riding a skateboard, but to please improve our prompt ahead of time. We also ask\nit to caption the generated image and create a video for it. The agent can decide which tool to use without us explicitly telling it.

\n\n
import os\n\nif not os.getenv(\"OPENAI_API_KEY\"):\n    raise ValueError(\"OPENAI_API_KEY must be set\")\n\nfrom langchain.agents import initialize_agent\nfrom langchain.llms import OpenAI\nfrom gradio_tools import (StableDiffusionTool, ImageCaptioningTool, StableDiffusionPromptGeneratorTool,\n                          TextToVideoTool)\n\nfrom langchain.memory import ConversationBufferMemory\n\nllm = OpenAI(temperature=0)\nmemory = ConversationBufferMemory(memory_key=\"chat_history\")\ntools = [StableDiffusionTool().langchain, ImageCaptioningTool().langchain,\n         StableDiffusionPromptGeneratorTool().langchain, TextToVideoTool().langchain]\n\n\nagent = initialize_agent(tools, llm, memory=memory, agent=\"conversational-react-description\", verbose=True)\noutput = agent.run(input=(\"Please create a photo of a dog riding a skateboard \"\n                          \"but improve my prompt prior to using an image generator.\"\n                          \"Please caption the generated image and create a video for it using the improved prompt.\"))\n
\n\n

You'll note that we are using some pre-built tools that come with gradio_tools. Please see this doc for a complete list of the tools that come with gradio_tools.\nIf you would like to use a tool that's not currently in gradio_tools, it is very easy to add your own. That's what the next section will cover.

\n\n

gradio_tools - creating your own tool

\n\n

The core abstraction is the GradioTool, which lets you define a new tool for your LLM as long as you implement a standard interface:

\n\n
class GradioTool(BaseTool):\n\n    def __init__(self, name: str, description: str, src: str) -> None:\n\n    @abstractmethod\n    def create_job(self, query: str) -> Job:\n        pass\n\n    @abstractmethod\n    def postprocess(self, output: Tuple[Any] | Any) -> str:\n        pass\n
\n\n

The requirements are:\n1. The name for your tool\n2. The description for your tool. This is crucial! Agents decide which tool to use based on their description. Be precise and be sure to include example of what the input and the output of the tool should look like.\n3. The url or space id, e.g. freddyaboulton/calculator, of the Gradio application. Based on this value, gradio_tool will create a gradio client instance to query the upstream application via API. Be sure to click the link and learn more about the gradio client library if you are not familiar with it.\n4. create_job - Given a string, this method should parse that string and return a job from the client. Most times, this is as simple as passing the string to the submit function of the client. More info on creating jobs here\n5. postprocess - Given the result of the job, convert it to a string the LLM can display to the user.\n6. Optional - Some libraries, e.g. MiniChain, may need some info about the underlying gradio input and output types used by the tool. By default, this will return gr.Textbox() but \nif you'd like to provide more accurate info, implement the _block_input(self, gr) and _block_output(self, gr) methods of the tool. The gr variable is the gradio module (the result of import gradio as gr). It will be\nautomatically imported by the GradiTool parent class and passed to the _block_input and _block_output methods.

\n\n

And that's it!

\n\n

Once you have created your tool, open a pull request to the gradio_tools repo! We welcome all contributions.

\n\n

Example tool - Stable Diffusion

\n\n

Here is the code for the StableDiffusion tool as an example:

\n\n
from gradio_tool import GradioTool\nimport os\n\nclass StableDiffusionTool(GradioTool):\n    \"\"\"Tool for calling stable diffusion from llm\"\"\"\n\n    def __init__(\n        self,\n        name=\"StableDiffusion\",\n        description=(\n            \"An image generator. Use this to generate images based on \"\n            \"text input. Input should be a description of what the image should \"\n            \"look like. The output will be a path to an image file.\"\n        ),\n        src=\"gradio-client-demos/stable-diffusion\",\n        hf_token=None,\n    ) -> None:\n        super().__init__(name, description, src, hf_token)\n\n    def create_job(self, query: str) -> Job:\n        return self.client.submit(query, \"\", 9, fn_index=1)\n\n    def postprocess(self, output: str) -> str:\n        return [os.path.join(output, i) for i in os.listdir(output) if not i.endswith(\"json\")][0]\n\n    def _block_input(self, gr) -> \"gr.components.Component\":\n        return gr.Textbox()\n\n    def _block_output(self, gr) -> \"gr.components.Component\":\n        return gr.Image()\n
\n\n

Some notes on this implementation:\n1. All instances of GradioTool have an attribute called client that is a pointed to the underlying gradio client. That is what you should use\nin the create_job method.\n2. create_job just passes the query string to the submit function of the client with some other parameters hardcoded, i.e. the negative prompt string and the guidance scale. We could modify our tool to also accept these values from the input string in a subsequent version.\n3. The postprocess method simply returns the first image from the gallery of images created by the stable diffusion space. We use the os module to get the full path of the image.

\n\n

Conclusion

\n\n

You now know how to extend the abilities of your LLM with the 1000s of gradio spaces running in the wild!\nAgain, we welcome any contributions to the gradio_tools library.\nWe're excited to see the tools you all build!

\n", "tags": [], "spaces": [], "url": "/guides/gradio-and-llm-agents/", "contributor": null}]}, {"category": "Other Tutorials", "guides": [{"name": "building-a-pictionary-app", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 33, "pretty_name": "Building A Pictionary App", "content": "# Building a Pictionary App\n\n\n\n\n## Introduction\n\nHow well can an algorithm guess what you're drawing? A few years ago, Google released the **Quick Draw** dataset, which contains drawings made by humans of a variety of every objects. Researchers have used this dataset to train models to guess Pictionary-style drawings. \n\nSuch models are perfect to use with Gradio's *sketchpad* input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and will look like this (try drawing something!):\n\n\n\nLet's get started! This guide covers how to build a pictionary app (step-by-step): \n\n1. [Set up the Sketch Recognition Model](#1-set-up-the-sketch-recognition-model)\n2. [Define a `predict` function](#2-define-a-predict-function)\n3. [Create a Gradio Interface](#3-create-a-gradio-interface)\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained sketchpad model, also install `torch`.\n\n## 1. Set up the Sketch Recognition Model\n\nFirst, you will need a sketch recognition model. Since many researchers have already trained their own models on the Quick Draw dataset, we will use a pretrained model in this tutorial. Our model is a light 1.5 MB model trained by Nate Raw, that [you can download here](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/pytorch_model.bin). \n\nIf you are interested, here [is the code](https://github.com/nateraw/quickdraw-pytorch) that was used to train the model. We will simply load the pretrained model in PyTorch, as follows:\n\n```python\nimport torch\nfrom torch import nn\n\nmodel = nn.Sequential(\n nn.Conv2d(1, 32, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(32, 64, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(64, 128, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Flatten(),\n nn.Linear(1152, 256),\n nn.ReLU(),\n nn.Linear(256, len(LABELS)),\n)\nstate_dict = torch.load('pytorch_model.bin', map_location='cpu')\nmodel.load_state_dict(state_dict, strict=False)\nmodel.eval()\n```\n\n## 2. Define a `predict` function\n\nNext, you will need to define a function that takes in the *user input*, which in this case is a sketched image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/class_names.txt).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nfrom pathlib import Path\n\nLABELS = Path('class_names.txt').read_text().splitlines()\n\ndef predict(img):\n x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.\n with torch.no_grad():\n out = model(x)\n probabilities = torch.nn.functional.softmax(out[0], dim=0)\n values, indices = torch.topk(probabilities, 5)\n confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}\n return confidences\n```\n\nLet's break this down. The function takes one parameters:\n\n* `img`: the input image as a `numpy` array\n\nThen, the function converts the image to a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the top five predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## 3. Create a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a sketchpad. To create a sketchpad input, we can use the convenient string shortcut, `\"sketchpad\"` which creates a canvas for a user to draw on and handles the preprocessing to convert that to a numpy array. \n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form.\n\nFinally, we'll add one more parameter, setting `live=True`, which allows our interface to run in real time, adjusting its predictions every time a user draws on the sketchpad. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n inputs=\"sketchpad\",\n outputs=\"label\",\n live=True).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try drawing something, like a \"snake\" or a \"laptop\"):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a Pictionary-style guessing app. Have fun and try to find some edge cases \ud83e\uddd0\n\n", "html": "

Building a Pictionary App

\n\n

Introduction

\n\n

How well can an algorithm guess what you're drawing? A few years ago, Google released the Quick Draw dataset, which contains drawings made by humans of a variety of every objects. Researchers have used this dataset to train models to guess Pictionary-style drawings.

\n\n

Such models are perfect to use with Gradio's sketchpad input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and will look like this (try drawing something!):

\n\n\n\n

Let's get started! This guide covers how to build a pictionary app (step-by-step):

\n\n
    \n
  1. Set up the Sketch Recognition Model
  2. \n
  3. Define a predict function
  4. \n
  5. Create a Gradio Interface
  6. \n
\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained sketchpad model, also install torch.

\n\n

1. Set up the Sketch Recognition Model

\n\n

First, you will need a sketch recognition model. Since many researchers have already trained their own models on the Quick Draw dataset, we will use a pretrained model in this tutorial. Our model is a light 1.5 MB model trained by Nate Raw, that you can download here.

\n\n

If you are interested, here is the code that was used to train the model. We will simply load the pretrained model in PyTorch, as follows:

\n\n
import torch\nfrom torch import nn\n\nmodel = nn.Sequential(\n    nn.Conv2d(1, 32, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Conv2d(32, 64, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Conv2d(64, 128, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Flatten(),\n    nn.Linear(1152, 256),\n    nn.ReLU(),\n    nn.Linear(256, len(LABELS)),\n)\nstate_dict = torch.load('pytorch_model.bin',    map_location='cpu')\nmodel.load_state_dict(state_dict, strict=False)\nmodel.eval()\n
\n\n

2. Define a predict function

\n\n

Next, you will need to define a function that takes in the user input, which in this case is a sketched image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
from pathlib import Path\n\nLABELS = Path('class_names.txt').read_text().splitlines()\n\ndef predict(img):\n    x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.\n    with torch.no_grad():\n        out = model(x)\n    probabilities = torch.nn.functional.softmax(out[0], dim=0)\n    values, indices = torch.topk(probabilities, 5)\n    confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}\n    return confidences\n
\n\n

Let's break this down. The function takes one parameters:

\n\n
    \n
  • img: the input image as a numpy array
  • \n
\n\n

Then, the function converts the image to a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the top five predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

3. Create a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a sketchpad. To create a sketchpad input, we can use the convenient string shortcut, \"sketchpad\" which creates a canvas for a user to draw on and handles the preprocessing to convert that to a numpy array.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form.

\n\n

Finally, we'll add one more parameter, setting live=True, which allows our interface to run in real time, adjusting its predictions every time a user draws on the sketchpad. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=\"sketchpad\",\n             outputs=\"label\",\n             live=True).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try drawing something, like a \"snake\" or a \"laptop\"):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a Pictionary-style guessing app. Have fun and try to find some edge cases \ud83e\uddd0

\n", "tags": ["SKETCHPAD", "LABELS", "LIVE"], "spaces": ["https://huggingface.co/spaces/nateraw/quickdraw"], "url": "/guides/building-a-pictionary-app/", "contributor": null}, {"name": "create-your-own-friends-with-a-gan", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 34, "pretty_name": "Create Your Own Friends With A Gan", "content": "# Create Your Own Friends with a GAN\n\n\n\n\n\n\n\n## Introduction\n\nIt seems that cryptocurrencies, [NFTs](https://www.nytimes.com/interactive/2022/03/18/technology/nft-guide.html), and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets [may be taxable, such as in Canada](https://www.canada.ca/en/revenue-agency/programs/about-canada-revenue-agency-cra/compliance/digital-currency/cryptocurrency-guide.html), today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated [CryptoPunks](https://www.larvalabs.com/cryptopunks).\n\nGenerative Adversarial Networks, often known just as *GANs*, are a specific class of deep-learning models that are designed to learn from an input dataset to create (*generate!*) new material that is convincingly similar to elements of the original training set. Famously, the website [thispersondoesnotexist.com](https://thispersondoesnotexist.com/) went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even [music](https://salu133445.github.io/musegan/)!\n\nToday we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:\n\n\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained model, also install `torch` and `torchvision`.\n\n## GANs: a very brief introduction\n\nOriginally proposed in [Goodfellow et al. 2014](https://arxiv.org/abs/1406.2661), GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the *generator*, is responsible for generating images. The other network, the *discriminator*, receives an image at a time from the generator along with a **real** image from the training data set. The discriminator then has to guess: which image is the fake?\n\nThe generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (*adversarial!*) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!\n\nFor a more in-depth look at GANs, you can take a look at [this excellent post on Analytics Vidhya](https://www.analyticsvidhya.com/blog/2021/06/a-detailed-explanation-of-gan-with-implementation-using-tensorflow-and-keras/) or this [PyTorch tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html). For now, though, we'll dive into a demo!\n\n## Step 1 \u2014 Create the Generator model\n\nTo generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:\n\n```python\nfrom torch import nn\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n```\n\nWe're taking the generator from [this repo by @teddykoker](https://github.com/teddykoker/cryptopunks-gan/blob/main/train.py#L90), where you can also see the original discriminator model structure.\n\nAfter instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at [nateraw/cryptopunks-gan](https://huggingface.co/nateraw/cryptopunks-gan):\n\n```python\nfrom huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n```\n\n## Step 2 \u2014 Defining a `predict` function\n\nThe `predict` function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our `predict` function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use `torchvision`'s `save_image` function to save the output of the model as a `png` file, and return the file name:\n\n```python\nfrom torchvision.utils import save_image\n\ndef predict(seed):\n num_punks = 4\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWe're giving our `predict` function a `seed` parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.\n\n*Note!* Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.\n\n## Step 3 \u2014 Creating a Gradio interface\n\nAt this point you can even run the code you have with `predict()`, and you'll find your freshly generated punks in your file system at `./punks.png`. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:\n\n* Set a slider input so users can choose the \"seed\" value\n* Use an image component for our output to showcase the generated punks\n* Use our `predict()` to take the seed and generate the images\n\nWith `gr.Interface()`, we can define all of that with a single function call:\n\n```python\nimport gradio as gr\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n ],\n outputs=\"image\",\n).launch()\n```\n\nLaunching the interface should present you with something like this:\n\n\n\n## Step 4 \u2014 Even more punks!\n\nGenerating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the `inputs` list that we pass to `gr.Interface`:\n\n```python\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n ],\n outputs=\"image\",\n).launch()\n```\n\nThe new input will be passed to our `predict()` function, so we have to make some changes to that function to accept a new parameter:\n\n```python\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWhen you relaunch your interface, you should see a second slider that'll let you control the number of punks!\n\n## Step 5 - Polishing it up\n\nYour Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728\n\nWe can add some examples that users can easily try out by adding this to the `gr.Interface`:\n\n```python\ngr.Interface(\n # ...\n # keep everything as it is, and then add\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n```\n\nThe `examples` parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the `inputs`. So in our case, `[seed, num_punks]`. Give it a try!\n\nYou can also try adding a `title`, `description`, and `article` to the `gr.Interface`. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 `article` will also accept HTML, as [explored in a previous guide](/guides/key-features/#descriptive-content)!\n\nWhen you're all done, you may end up with something like this:\n\n\n\nFor reference, here is our full code:\n\n```python\nimport torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n ],\n outputs=\"image\",\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n```\n----------\n\nCongratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can [scour the Hub for more GANs](https://huggingface.co/models?other=gan) (or train your own) and continue making even more awesome demos \ud83e\udd17", "html": "

Create Your Own Friends with a GAN

\n\n

Introduction

\n\n

It seems that cryptocurrencies, NFTs, and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets may be taxable, such as in Canada, today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated CryptoPunks.

\n\n

Generative Adversarial Networks, often known just as GANs, are a specific class of deep-learning models that are designed to learn from an input dataset to create (generate!) new material that is convincingly similar to elements of the original training set. Famously, the website thispersondoesnotexist.com went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even music!

\n\n

Today we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:

\n\n\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained model, also install torch and torchvision.

\n\n

GANs: a very brief introduction

\n\n

Originally proposed in Goodfellow et al. 2014, GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the generator, is responsible for generating images. The other network, the discriminator, receives an image at a time from the generator along with a real image from the training data set. The discriminator then has to guess: which image is the fake?

\n\n

The generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (adversarial!) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!

\n\n

For a more in-depth look at GANs, you can take a look at this excellent post on Analytics Vidhya or this PyTorch tutorial. For now, though, we'll dive into a demo!

\n\n

Step 1 \u2014 Create the Generator model

\n\n

To generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:

\n\n
from torch import nn\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n
\n\n

We're taking the generator from this repo by @teddykoker, where you can also see the original discriminator model structure.

\n\n

After instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at nateraw/cryptopunks-gan:

\n\n
from huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n
\n\n

Step 2 \u2014 Defining a predict function

\n\n

The predict function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our predict function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use torchvision's save_image function to save the output of the model as a png file, and return the file name:

\n\n
from torchvision.utils import save_image\n\ndef predict(seed):\n    num_punks = 4\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

We're giving our predict function a seed parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.

\n\n

Note! Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.

\n\n

Step 3 \u2014 Creating a Gradio interface

\n\n

At this point you can even run the code you have with predict(<SOME_NUMBER>), and you'll find your freshly generated punks in your file system at ./punks.png. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:

\n\n
    \n
  • Set a slider input so users can choose the \"seed\" value
  • \n
  • Use an image component for our output to showcase the generated punks
  • \n
  • Use our predict() to take the seed and generate the images
  • \n
\n\n

With gr.Interface(), we can define all of that with a single function call:

\n\n
import gradio as gr\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

Launching the interface should present you with something like this:

\n\n\n\n

Step 4 \u2014 Even more punks!

\n\n

Generating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the inputs list that we pass to gr.Interface:

\n\n
gr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

The new input will be passed to our predict() function, so we have to make some changes to that function to accept a new parameter:

\n\n
def predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

When you relaunch your interface, you should see a second slider that'll let you control the number of punks!

\n\n

Step 5 - Polishing it up

\n\n

Your Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728

\n\n

We can add some examples that users can easily try out by adding this to the gr.Interface:

\n\n
gr.Interface(\n    # ...\n    # keep everything as it is, and then add\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n
\n\n

The examples parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the inputs. So in our case, [seed, num_punks]. Give it a try!

\n\n

You can also try adding a title, description, and article to the gr.Interface. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 article will also accept HTML, as explored in a previous guide!

\n\n

When you're all done, you may end up with something like this:

\n\n\n\n

For reference, here is our full code:

\n\n
import torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n    ],\n    outputs=\"image\",\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n
\n\n
\n\n

Congratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can scour the Hub for more GANs (or train your own) and continue making even more awesome demos \ud83e\udd17

\n", "tags": ["GAN", "IMAGE", "HUB"], "spaces": ["https://huggingface.co/spaces/NimaBoscarino/cryptopunks", "https://huggingface.co/spaces/nateraw/cryptopunks-generator"], "url": "/guides/create-your-own-friends-with-a-gan/", "contributor": "Nima Boscarino and Nate Raw"}, {"name": "creating-a-new-component", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 35, "pretty_name": "Creating A New Component", "content": "# How to Create a New Component\n\n## Introduction\n\nThe purpose of this guide is to illustrate how to add a new component, which you can use in your Gradio applications. The guide will be complemented by code snippets showing step by step how the [ColorPicker](https://gradio.app/docs/#colorpicker) component was added.\n\n## Prerequisites\n\nMake sure you have followed the [CONTRIBUTING.md](https://github.com/gradio-app/gradio/blob/main/CONTRIBUTING.md) guide in order to setup your local development environment (both client and server side).\n\nHere's how to create a new component on Gradio:\n\n1. [Create a New Python Class and Import it](#1-create-a-new-python-class-and-import-it)\n2. [Create a New Svelte Component](#2-create-a-new-svelte-component)\n3. [Create a New Demo](#3-create-a-new-demo)\n\n## 1. Create a New Python Class and Import it\n\nThe first thing to do is to create a new class within the [components.py](https://github.com/gradio-app/gradio/blob/main/gradio/components.py) file. This Python class should inherit from a list of base components and should be placed within the file in the correct section with respect to the type of component you want to add (e.g. input, output or static components).\nIn general, it is advisable to take an existing component as a reference (e.g. [TextBox](https://github.com/gradio-app/gradio/blob/main/gradio/components.py#L290)), copy its code as a skeleton and then adapt it to the case at hand.\n\nLet's take a look at the class added to the [components.py](https://github.com/gradio-app/gradio/blob/main/gradio/components.py) file for the ColorPicker component:\n\n```python\n@document()\nclass ColorPicker(Changeable, Submittable, IOComponent):\n \"\"\"\n Creates a color picker for user to select a color as string input.\n Preprocessing: passes selected color value as a {str} into the function.\n Postprocessing: expects a {str} returned from function and sets color picker value to it.\n Examples-format: a {str} with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.\n Demos: color_picker, color_generator\n \"\"\"\n\n def __init__(\n self,\n value: str = None,\n *,\n label: Optional[str] = None,\n show_label: bool = True,\n interactive: Optional[bool] = None,\n visible: bool = True,\n elem_id: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"\n Parameters:\n value: default text to provide in color picker.\n label: component name in interface.\n show_label: if True, will display label.\n interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.\n visible: If False, component will be hidden.\n elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n \"\"\"\n self.value = self.postprocess(value)\n self.cleared_value = \"#000000\"\n self.test_input = value\n IOComponent.__init__(\n self,\n label=label,\n show_label=show_label,\n interactive=interactive,\n visible=visible,\n elem_id=elem_id,\n **kwargs,\n )\n\n def get_config(self):\n return {\n \"value\": self.value,\n **IOComponent.get_config(self),\n }\n\n @staticmethod\n def update(\n value: Optional[Any] = None,\n label: Optional[str] = None,\n show_label: Optional[bool] = None,\n visible: Optional[bool] = None,\n interactive: Optional[bool] = None,\n ):\n return {\n \"value\": value,\n \"label\": label,\n \"show_label\": show_label,\n \"visible\": visible,\n \"interactive\": interactive,\n \"__type__\": \"update\",\n }\n\n # Input Functionalities\n def preprocess(self, x: str | None) -> Any:\n \"\"\"\n Any preprocessing needed to be performed on function input.\n Parameters:\n x (str): text\n Returns:\n (str): text\n \"\"\"\n if x is None:\n return None\n else:\n return str(x)\n\n def preprocess_example(self, x: str | None) -> Any:\n \"\"\"\n Any preprocessing needed to be performed on an example before being passed to the main function.\n \"\"\"\n if x is None:\n return None\n else:\n return str(x)\n\n # Output Functionalities\n def postprocess(self, y: str | None):\n \"\"\"\n Any postprocessing needed to be performed on function output.\n Parameters:\n y (str | None): text\n Returns:\n (str | None): text\n \"\"\"\n if y is None:\n return None\n else:\n return str(y)\n\n def deserialize(self, x):\n \"\"\"\n Convert from serialized output (e.g. base64 representation) from a call() to the interface to a human-readable version of the output (path of an image, etc.)\n \"\"\"\n return x\n```\n\nOnce defined, it is necessary to import the new class inside the [\\_\\_init\\_\\_](https://github.com/gradio-app/gradio/blob/main/gradio/__init__.py) module class in order to make it module visible.\n\n```python\n\nfrom gradio.components import (\n ...\n ColorPicker,\n ...\n)\n\n```\n\n### 1.1 Writing Unit Test for Python Class\n\nWhen developing new components, you should also write a suite of unit tests for it. The tests should be placed in the [gradio/test/test_components.py](https://github.com/gradio-app/gradio/blob/main/test/test_components.py) file. Again, as above, take a cue from the tests of other components (e.g. [Textbox](https://github.com/gradio-app/gradio/blob/main/test/test_components.py)) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component. For example, the following tests were added for the ColorPicker component:\n\n```python\nclass TestColorPicker(unittest.TestCase):\n def test_component_functions(self):\n \"\"\"\n Preprocess, postprocess, serialize, save_flagged, restore_flagged, tokenize, get_config\n \"\"\"\n color_picker_input = gr.ColorPicker()\n self.assertEqual(color_picker_input.preprocess(\"#000000\"), \"#000000\")\n self.assertEqual(color_picker_input.preprocess_example(\"#000000\"), \"#000000\")\n self.assertEqual(color_picker_input.postprocess(None), None)\n self.assertEqual(color_picker_input.postprocess(\"#FFFFFF\"), \"#FFFFFF\")\n self.assertEqual(color_picker_input.serialize(\"#000000\", True), \"#000000\")\n\n color_picker_input.interpretation_replacement = \"unknown\"\n\n self.assertEqual(\n color_picker_input.get_config(),\n {\n \"value\": None,\n \"show_label\": True,\n \"label\": None,\n \"style\": {},\n \"elem_id\": None,\n \"visible\": True,\n \"interactive\": None,\n \"name\": \"colorpicker\",\n },\n )\n\n def test_in_interface_as_input(self):\n \"\"\"\n Interface, process, interpret,\n \"\"\"\n iface = gr.Interface(lambda x: x, \"colorpicker\", \"colorpicker\")\n self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n def test_in_interface_as_output(self):\n \"\"\"\n Interface, process\n\n \"\"\"\n iface = gr.Interface(lambda x: x, \"colorpicker\", gr.ColorPicker())\n self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n def test_static(self):\n \"\"\"\n postprocess\n \"\"\"\n component = gr.ColorPicker(\"#000000\")\n self.assertEqual(component.get_config().get(\"value\"), \"#000000\")\n```\n\n## 2. Create a New Svelte Component\n\nLet's see the steps you need to follow to create the frontend of your new component and to map it to its python code:\n\n- Create a new UI-side Svelte component and figure out where to place it. The options are: create a package for the new component in the [js folder](https://github.com/gradio-app/gradio/tree/main/js/), if this is completely different from existing components or add the new component to an existing package, such as to the [form package](https://github.com/gradio-app/gradio/tree/main/js/form). The ColorPicker component for example, was included in the form package because it is similar to components that already exist.\n- Create a file with an appropriate name in the src folder of the package where you placed the Svelte component, note: the name must start with a capital letter. This is the 'core' component and it's the generic component that has no knowledge of Gradio specific functionality. Initially add any text/html to this file so that the component renders something. The Svelte application code for the ColorPicker looks like this:\n\n```typescript\n\n\n\n\n```\n\n- Export this file inside the index.ts file of the package where you placed the Svelte component by doing `export { default as FileName } from \"./FileName.svelte\"`. The ColorPicker file is exported in the [index.ts](https://github.com/gradio-app/gradio/blob/main/js/form/src/index.ts) file and the export is performed by doing: `export { default as ColorPicker } from \"./ColorPicker.svelte\";`.\n- Create the Gradio specific component in [js/app/src/components](https://github.com/gradio-app/gradio/tree/main/js/app/src/components). This is a Gradio wrapper that handles the specific logic of the library, passes the necessary data down to the core component and attaches any necessary event listeners. Copy the folder of another component, rename it and edit the code inside it, keeping the structure.\n\nHere you will have three files, the first file is for the Svelte application, and it will look like this:\n\n```typescript\n\n\n\n\n\n\t\n\n\t\n\n```\n\nThe second one contains the tests for the frontend, for example for the ColorPicker component:\n\n```typescript\nimport { test, describe, assert, afterEach } from \"vitest\";\nimport { cleanup, render } from \"@gradio/tootils\";\n\nimport ColorPicker from \"./ColorPicker.svelte\";\nimport type { LoadingStatus } from \"../StatusTracker/types\";\n\nconst loading_status = {\n\teta: 0,\n\tqueue_position: 1,\n\tstatus: \"complete\" as LoadingStatus[\"status\"],\n\tscroll_to_output: false,\n\tvisible: true,\n\tfn_index: 0\n};\n\ndescribe(\"ColorPicker\", () => {\n\tafterEach(() => cleanup());\n\n\ttest(\"renders provided value\", () => {\n\t\tconst { getByDisplayValue } = render(ColorPicker, {\n\t\t\tloading_status,\n\t\t\tshow_label: true,\n\t\t\tmode: \"dynamic\",\n\t\t\tvalue: \"#000000\",\n\t\t\tlabel: \"ColorPicker\"\n\t\t});\n\n\t\tconst item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\t\tassert.equal(item.value, \"#000000\");\n\t});\n\n\ttest(\"changing the color should update the value\", async () => {\n\t\tconst { component, getByDisplayValue } = render(ColorPicker, {\n\t\t\tloading_status,\n\t\t\tshow_label: true,\n\t\t\tmode: \"dynamic\",\n\t\t\tvalue: \"#000000\",\n\t\t\tlabel: \"ColorPicker\"\n\t\t});\n\n\t\tconst item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\n\t\tassert.equal(item.value, \"#000000\");\n\n\t\tawait component.$set({\n\t\t\tvalue: \"#FFFFFF\"\n\t\t});\n\n\t\tassert.equal(component.value, \"#FFFFFF\");\n\t});\n});\n```\n\nThe third one is the index.ts file:\n\n```typescript\nexport { default as Component } from \"./ColorPicker.svelte\";\nexport const modes = [\"static\", \"dynamic\"];\n```\n\n- Add the mapping for your component in the [directory.ts file](https://github.com/gradio-app/gradio/blob/main/js/app/src/components/directory.ts). To do this, copy and paste the mapping line of any component and edit its text. The key name must be the lowercase version of the actual component name in the Python library. So for example, for the ColorPicker component the mapping looks like this:\n\n```typescript\nexport const component_map = {\n...\ncolorpicker: () => import(\"./ColorPicker\"),\n...\n}\n```\n\n### 2.1 Writing Unit Test for Svelte Component\n\nWhen developing new components, you should also write a suite of unit tests for it. The tests should be placed in the new component's folder in a file named MyAwesomeComponent.test.ts. Again, as above, take a cue from the tests of other components (e.g. [Textbox.test.ts](https://github.com/gradio-app/gradio/blob/main/js/app/src/components/Textbox/Textbox.test.ts)) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component.\n\n### 3. Create a New Demo\n\nThe last step is to create a demo in the [gradio/demo folder](https://github.com/gradio-app/gradio/tree/main/demo), which will use the newly added component. Again, the suggestion is to reference an existing demo. Write the code for the demo in a file called run.py, add the necessary requirements and an image showing the application interface. Finally add a gif showing its usage.\nYou can take a look at the [demo](https://github.com/gradio-app/gradio/tree/main/demo/color_picker) created for the ColorPicker, where an icon and a color selected through the new component is taken as input, and the same icon colored with the selected color is returned as output.\n\nTo test the application:\n\n- run on a terminal `python path/demo/run.py` which starts the backend at the address [http://localhost:7860](http://localhost:7860);\n- in another terminal, run `pnpm dev` to start the frontend at [http://localhost:9876](http://localhost:9876) with hot reload functionalities.\n\n## Conclusion\n\nIn this guide, we have shown how simple it is to add a new component to Gradio, seeing step by step how the ColorPicker component was added. For further details, you can refer to PR: [#1695](https://github.com/gradio-app/gradio/pull/1695).\n", "html": "

How to Create a New Component

\n\n

Introduction

\n\n

The purpose of this guide is to illustrate how to add a new component, which you can use in your Gradio applications. The guide will be complemented by code snippets showing step by step how the ColorPicker component was added.

\n\n

Prerequisites

\n\n

Make sure you have followed the CONTRIBUTING.md guide in order to setup your local development environment (both client and server side).

\n\n

Here's how to create a new component on Gradio:

\n\n
    \n
  1. Create a New Python Class and Import it
  2. \n
  3. Create a New Svelte Component
  4. \n
  5. Create a New Demo
  6. \n
\n\n

1. Create a New Python Class and Import it

\n\n

The first thing to do is to create a new class within the components.py file. This Python class should inherit from a list of base components and should be placed within the file in the correct section with respect to the type of component you want to add (e.g. input, output or static components).\nIn general, it is advisable to take an existing component as a reference (e.g. TextBox), copy its code as a skeleton and then adapt it to the case at hand.

\n\n

Let's take a look at the class added to the components.py file for the ColorPicker component:

\n\n
@document()\nclass ColorPicker(Changeable, Submittable, IOComponent):\n    \"\"\"\n    Creates a color picker for user to select a color as string input.\n    Preprocessing: passes selected color value as a {str} into the function.\n    Postprocessing: expects a {str} returned from function and sets color picker value to it.\n    Examples-format: a {str} with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.\n    Demos: color_picker, color_generator\n    \"\"\"\n\n    def __init__(\n        self,\n        value: str = None,\n        *,\n        label: Optional[str] = None,\n        show_label: bool = True,\n        interactive: Optional[bool] = None,\n        visible: bool = True,\n        elem_id: Optional[str] = None,\n        **kwargs,\n    ):\n        \"\"\"\n        Parameters:\n            value: default text to provide in color picker.\n            label: component name in interface.\n            show_label: if True, will display label.\n            interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.\n            visible: If False, component will be hidden.\n            elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n        \"\"\"\n        self.value = self.postprocess(value)\n        self.cleared_value = \"#000000\"\n        self.test_input = value\n        IOComponent.__init__(\n            self,\n            label=label,\n            show_label=show_label,\n            interactive=interactive,\n            visible=visible,\n            elem_id=elem_id,\n            **kwargs,\n        )\n\n    def get_config(self):\n        return {\n            \"value\": self.value,\n            **IOComponent.get_config(self),\n        }\n\n    @staticmethod\n    def update(\n        value: Optional[Any] = None,\n        label: Optional[str] = None,\n        show_label: Optional[bool] = None,\n        visible: Optional[bool] = None,\n        interactive: Optional[bool] = None,\n    ):\n        return {\n            \"value\": value,\n            \"label\": label,\n            \"show_label\": show_label,\n            \"visible\": visible,\n            \"interactive\": interactive,\n            \"__type__\": \"update\",\n        }\n\n    # Input Functionalities\n    def preprocess(self, x: str | None) -> Any:\n        \"\"\"\n        Any preprocessing needed to be performed on function input.\n        Parameters:\n        x (str): text\n        Returns:\n        (str): text\n        \"\"\"\n        if x is None:\n            return None\n        else:\n            return str(x)\n\n    def preprocess_example(self, x: str | None) -> Any:\n        \"\"\"\n        Any preprocessing needed to be performed on an example before being passed to the main function.\n        \"\"\"\n        if x is None:\n            return None\n        else:\n            return str(x)\n\n    # Output Functionalities\n    def postprocess(self, y: str | None):\n        \"\"\"\n        Any postprocessing needed to be performed on function output.\n        Parameters:\n        y (str | None): text\n        Returns:\n        (str | None): text\n        \"\"\"\n        if y is None:\n            return None\n        else:\n            return str(y)\n\n    def deserialize(self, x):\n        \"\"\"\n        Convert from serialized output (e.g. base64 representation) from a call() to the interface to a human-readable version of the output (path of an image, etc.)\n        \"\"\"\n        return x\n
\n\n

Once defined, it is necessary to import the new class inside the __init__ module class in order to make it module visible.

\n\n
\nfrom gradio.components import (\n    ...\n    ColorPicker,\n    ...\n)\n\n
\n\n

1.1 Writing Unit Test for Python Class

\n\n

When developing new components, you should also write a suite of unit tests for it. The tests should be placed in the gradio/test/test_components.py file. Again, as above, take a cue from the tests of other components (e.g. Textbox) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component. For example, the following tests were added for the ColorPicker component:

\n\n
class TestColorPicker(unittest.TestCase):\n    def test_component_functions(self):\n        \"\"\"\n        Preprocess, postprocess, serialize, save_flagged, restore_flagged, tokenize, get_config\n        \"\"\"\n        color_picker_input = gr.ColorPicker()\n        self.assertEqual(color_picker_input.preprocess(\"#000000\"), \"#000000\")\n        self.assertEqual(color_picker_input.preprocess_example(\"#000000\"), \"#000000\")\n        self.assertEqual(color_picker_input.postprocess(None), None)\n        self.assertEqual(color_picker_input.postprocess(\"#FFFFFF\"), \"#FFFFFF\")\n        self.assertEqual(color_picker_input.serialize(\"#000000\", True), \"#000000\")\n\n        color_picker_input.interpretation_replacement = \"unknown\"\n\n        self.assertEqual(\n            color_picker_input.get_config(),\n            {\n                \"value\": None,\n                \"show_label\": True,\n                \"label\": None,\n                \"style\": {},\n                \"elem_id\": None,\n                \"visible\": True,\n                \"interactive\": None,\n                \"name\": \"colorpicker\",\n            },\n        )\n\n    def test_in_interface_as_input(self):\n        \"\"\"\n        Interface, process, interpret,\n        \"\"\"\n        iface = gr.Interface(lambda x: x, \"colorpicker\", \"colorpicker\")\n        self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n    def test_in_interface_as_output(self):\n        \"\"\"\n        Interface, process\n\n        \"\"\"\n        iface = gr.Interface(lambda x: x, \"colorpicker\", gr.ColorPicker())\n        self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n    def test_static(self):\n        \"\"\"\n        postprocess\n        \"\"\"\n        component = gr.ColorPicker(\"#000000\")\n        self.assertEqual(component.get_config().get(\"value\"), \"#000000\")\n
\n\n

2. Create a New Svelte Component

\n\n

Let's see the steps you need to follow to create the frontend of your new component and to map it to its python code:

\n\n
    \n
  • Create a new UI-side Svelte component and figure out where to place it. The options are: create a package for the new component in the js folder, if this is completely different from existing components or add the new component to an existing package, such as to the form package. The ColorPicker component for example, was included in the form package because it is similar to components that already exist.
  • \n
  • Create a file with an appropriate name in the src folder of the package where you placed the Svelte component, note: the name must start with a capital letter. This is the 'core' component and it's the generic component that has no knowledge of Gradio specific functionality. Initially add any text/html to this file so that the component renders something. The Svelte application code for the ColorPicker looks like this:
  • \n
\n\n
\n\n\n\n
\n\n
    \n
  • Export this file inside the index.ts file of the package where you placed the Svelte component by doing export { default as FileName } from \"./FileName.svelte\". The ColorPicker file is exported in the index.ts file and the export is performed by doing: export { default as ColorPicker } from \"./ColorPicker.svelte\";.
  • \n
  • Create the Gradio specific component in js/app/src/components. This is a Gradio wrapper that handles the specific logic of the library, passes the necessary data down to the core component and attaches any necessary event listeners. Copy the folder of another component, rename it and edit the code inside it, keeping the structure.
  • \n
\n\n

Here you will have three files, the first file is for the Svelte application, and it will look like this:

\n\n
\n\n\n\nmd5-1c697b7cbe98fa7733fff25acc68363e\n\n\n\n\n    \n\n    \n\n
\n\n

The second one contains the tests for the frontend, for example for the ColorPicker component:

\n\n
import { test, describe, assert, afterEach } from \"vitest\";\nimport { cleanup, render } from \"@gradio/tootils\";\n\nimport ColorPicker from \"./ColorPicker.svelte\";\nimport type { LoadingStatus } from \"../StatusTracker/types\";\n\nconst loading_status = {\n    eta: 0,\n    queue_position: 1,\n    status: \"complete\" as LoadingStatus[\"status\"],\n    scroll_to_output: false,\n    visible: true,\n    fn_index: 0\n};\n\ndescribe(\"ColorPicker\", () => {\n    afterEach(() => cleanup());\n\n    test(\"renders provided value\", () => {\n        const { getByDisplayValue } = render(ColorPicker, {\n            loading_status,\n            show_label: true,\n            mode: \"dynamic\",\n            value: \"#000000\",\n            label: \"ColorPicker\"\n        });\n\n        const item: HTMLInputElement = getByDisplayValue(\"#000000\");\n        assert.equal(item.value, \"#000000\");\n    });\n\n    test(\"changing the color should update the value\", async () => {\n        const { component, getByDisplayValue } = render(ColorPicker, {\n            loading_status,\n            show_label: true,\n            mode: \"dynamic\",\n            value: \"#000000\",\n            label: \"ColorPicker\"\n        });\n\n        const item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\n        assert.equal(item.value, \"#000000\");\n\n        await component.$set({\n            value: \"#FFFFFF\"\n        });\n\n        assert.equal(component.value, \"#FFFFFF\");\n    });\n});\n
\n\n

The third one is the index.ts file:

\n\n
export { default as Component } from \"./ColorPicker.svelte\";\nexport const modes = [\"static\", \"dynamic\"];\n
\n\n
    \n
  • Add the mapping for your component in the directory.ts file. To do this, copy and paste the mapping line of any component and edit its text. The key name must be the lowercase version of the actual component name in the Python library. So for example, for the ColorPicker component the mapping looks like this:
  • \n
\n\n
export const component_map = {\n...\ncolorpicker: () => import(\"./ColorPicker\"),\n...\n}\n
\n\n

2.1 Writing Unit Test for Svelte Component

\n\n

When developing new components, you should also write a suite of unit tests for it. The tests should be placed in the new component's folder in a file named MyAwesomeComponent.test.ts. Again, as above, take a cue from the tests of other components (e.g. Textbox.test.ts) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component.

\n\n

3. Create a New Demo

\n\n

The last step is to create a demo in the gradio/demo folder, which will use the newly added component. Again, the suggestion is to reference an existing demo. Write the code for the demo in a file called run.py, add the necessary requirements and an image showing the application interface. Finally add a gif showing its usage.\nYou can take a look at the demo created for the ColorPicker, where an icon and a color selected through the new component is taken as input, and the same icon colored with the selected color is returned as output.

\n\n

To test the application:

\n\n
    \n
  • run on a terminal python path/demo/run.py which starts the backend at the address http://localhost:7860;
  • \n
  • in another terminal, run pnpm dev to start the frontend at http://localhost:9876 with hot reload functionalities.
  • \n
\n\n

Conclusion

\n\n

In this guide, we have shown how simple it is to add a new component to Gradio, seeing step by step how the ColorPicker component was added. For further details, you can refer to PR: #1695.

\n", "tags": [], "spaces": [], "url": "/guides/creating-a-new-component/", "contributor": null}, {"name": "custom-interpretations-with-blocks", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 36, "pretty_name": "Custom Interpretations With Blocks", "content": "# Custom Machine Learning Interpretations with Blocks\n\n\n**Prerequisite**: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to [read the Guide to Blocks first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control) as well as the\ninterpretation section of the [Advanced Interface Features Guide](/advanced-interface-features#interpreting-your-predictions).\n\n## Introduction\n\nIf you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the `interpretation` parameter to either \"default\" or \"shap\".\n\nYou may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!\n\nThis guide will show how to:\n\n1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.\n2. Customize how interpretations are displayed in a Blocks app.\n\nLet's get started!\n\n## Setting up the Blocks app\n\nLet's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input `Textbox` and a single output `Label` component.\nBelow is the code for the app as well as the app itself.\n\n```python\nimport gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n pred = sentiment_classifier(text)\n return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n\n classify.click(classifier, input_text, label)\ndemo.launch()\n```\n\n \n\n## Adding interpretations to the app\n\nOur goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!\n\nFor each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those `(word, score)` pairs we can use gradio to visualize them for the user.\n\nThe [shap](https://shap.readthedocs.io/en/stable/index.html) library will help us compute the `(word, score)` pairs and\ngradio will take care of displaying the output to the user.\n\nThe following code computes the `(word, score)` pairs:\n\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n \n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n # Scores contains (word, score) pairs\n \n \n # Format expected by gr.components.Interpretation\n return {\"original\": text, \"interpretation\": scores}\n```\n\nNow, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use `gr.components.Interpretation`.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how `Interface` displays the interpretation output for text.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n interpretation = gr.components.Interpretation(input_text)\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n```\n\n \n\n\n## Customizing how the interpretation is displayed\n\nThe `gr.components.Interpretation` component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?\n\nOne way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.\n\nWe can do this by modifying our `interpretation_function` to additionally return a matplotlib bar plot.\nWe will display it with the `gr.Plot` component in a separate tab.\n\nThis is how the interpretation function will look:\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n # Filter out empty string added by shap\n scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n fig_m = plt.figure()\n \n # Select top 5 words that contribute to positive sentiment\n plt.bar(x=[s[0] for s in scores_desc[:5]],\n height=[s[1] for s in scores_desc[:5]])\n plt.title(\"Top words contributing to positive sentiment\")\n plt.ylabel(\"Shap Value\")\n plt.xlabel(\"Word\")\n return {\"original\": text, \"interpretation\": scores}, fig_m\n```\n\nAnd this is how the app code will look:\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n with gr.Tabs():\n with gr.TabItem(\"Display interpretation with built-in component\"):\n interpretation = gr.components.Interpretation(input_text)\n with gr.TabItem(\"Display interpretation with plot\"):\n interpretation_plot = gr.Plot()\n\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n```\n\nYou can see the demo below!\n\n \n\n## Beyond Sentiment Classification\nAlthough we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an `gr.Image` or `gr.Label` but the input can be almost anything (`gr.Number`, `gr.Slider`, `gr.Radio`, `gr.Image`).\n\nHere is a demo built with blocks of interpretations for an image classification model:\n\n \n\n\n## Closing remarks\n\nWe did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.\n\nWe also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.\n\nAdding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!\n", "html": "

Custom Machine Learning Interpretations with Blocks

\n\n

Prerequisite: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to read the Guide to Blocks first as well as the\ninterpretation section of the Advanced Interface Features Guide.

\n\n

Introduction

\n\n

If you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the interpretation parameter to either \"default\" or \"shap\".

\n\n

You may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!

\n\n

This guide will show how to:

\n\n
    \n
  1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.
  2. \n
  3. Customize how interpretations are displayed in a Blocks app.
  4. \n
\n\n

Let's get started!

\n\n

Setting up the Blocks app

\n\n

Let's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input Textbox and a single output Label component.\nBelow is the code for the app as well as the app itself.

\n\n
import gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n    pred = sentiment_classifier(text)\n    return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n\n    classify.click(classifier, input_text, label)\ndemo.launch()\n
\n\n

\n\n

Adding interpretations to the app

\n\n

Our goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!

\n\n

For each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those (word, score) pairs we can use gradio to visualize them for the user.

\n\n

The shap library will help us compute the (word, score) pairs and\ngradio will take care of displaying the output to the user.

\n\n

The following code computes the (word, score) pairs:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n    # Scores contains (word, score) pairs\n\n\n    # Format expected by gr.components.Interpretation\n    return {\"original\": text, \"interpretation\": scores}\n
\n\n

Now, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use gr.components.Interpretation.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how Interface displays the interpretation output for text.

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            interpretation = gr.components.Interpretation(input_text)\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n
\n\n

\n\n

Customizing how the interpretation is displayed

\n\n

The gr.components.Interpretation component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?

\n\n

One way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.

\n\n

We can do this by modifying our interpretation_function to additionally return a matplotlib bar plot.\nWe will display it with the gr.Plot component in a separate tab.

\n\n

This is how the interpretation function will look:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n    scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n    # Filter out empty string added by shap\n    scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n    fig_m = plt.figure()\n\n    # Select top 5 words that contribute to positive sentiment\n    plt.bar(x=[s[0] for s in scores_desc[:5]],\n            height=[s[1] for s in scores_desc[:5]])\n    plt.title(\"Top words contributing to positive sentiment\")\n    plt.ylabel(\"Shap Value\")\n    plt.xlabel(\"Word\")\n    return {\"original\": text, \"interpretation\": scores}, fig_m\n
\n\n

And this is how the app code will look:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            with gr.Tabs():\n                with gr.TabItem(\"Display interpretation with built-in component\"):\n                    interpretation = gr.components.Interpretation(input_text)\n                with gr.TabItem(\"Display interpretation with plot\"):\n                    interpretation_plot = gr.Plot()\n\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n
\n\n

You can see the demo below!

\n\n

\n\n

Beyond Sentiment Classification

\n\n

Although we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an gr.Image or gr.Label but the input can be almost anything (gr.Number, gr.Slider, gr.Radio, gr.Image).

\n\n

Here is a demo built with blocks of interpretations for an image classification model:

\n\n

\n\n

Closing remarks

\n\n

We did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.

\n\n

We also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.

\n\n

Adding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!

\n", "tags": ["INTERPRETATION", "SENTIMENT ANALYSIS"], "spaces": [], "url": "/guides/custom-interpretations-with-blocks/", "contributor": null}, {"name": "developing-faster-with-reload-mode", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 37, "pretty_name": "Developing Faster With Reload Mode", "content": "# Developing Faster with Auto-Reloading\n\n**Prerequisite**: This Guide requires you to know about Blocks. Make sure to [read the Guide to Blocks first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control).\n\nThis guide covers auto reloading, reloading in a Python IDE, and using gradio with Jupyter Notebooks.\n\n## Why Auto-Reloading?\n\nWhen you are building a Gradio demo, particularly out of Blocks, you may find it cumbersome to keep re-running your code to test your changes.\n\nTo make it faster and more convenient to write your code, we've made it easier to \"reload\" your Gradio apps instantly when you are developing in a **Python IDE** (like VS Code, Sublime Text, PyCharm, or so on) or generally running your Python code from the terminal. We've also developed an analogous \"magic command\" that allows you to re-run cells faster if you use **Jupyter Notebooks** (or any similar environment like Colab).\n\nThis short Guide will cover both of these methods, so no matter how you write Python, you'll leave knowing how to build Gradio apps faster.\n\n## Python IDE Reload \ud83d\udd25\n\nIf you are building Gradio Blocks using a Python IDE, your file of code (let's name it `run.py`) might look something like this: \n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# Greetings from Gradio!\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n\n inp.change(fn=lambda x: f\"Welcome, {x}!\", \n inputs=inp, \n outputs=out)\n\nif __name__ == \"__main__\":\n demo.launch() \n```\n\nThe problem is that anytime that you want to make a change to your layout, events, or components, you have to close and rerun your app by writing `python run.py`.\n\nInstead of doing this, you can run your code in **reload mode** by changing 1 word: `python` to `gradio`:\n\nIn the terminal, run `gradio run.py`. That's it! \n\nNow, you'll see that after you'll see something like this:\n\n```bash\nLaunching in *reload mode* on: http://127.0.0.1:7860 (Press CTRL+C to quit)\n\nWatching...\n\nWARNING: The --reload flag should not be used in production on Windows.\n```\n\nThe important part here is the line that says `Watching...` What's happening here is that Gradio will be observing the directory where `run.py` file lives, and if the file changes, it will automatically rerun the file for you. So you can focus on writing your code, and your Gradio demo will refresh automatically \ud83e\udd73\n\n\u26a0\ufe0f Warning: the `gradio` command does not detect the parameters passed to the `launch()` methods because the `launch()` method is never called in reload mode. For example, setting `auth`, or `show_error` in `launch()` will not be reflected in the app.\n\nThere is one important thing to keep in mind when using the reload mode: Gradio specifically looks for a Gradio Blocks/Interface demo called `demo` in your code. If you have named your demo something else, you will need to pass in the name of your demo's FastAPI app as the 2nd parameter in your code. For Gradio demos, the FastAPI app can be accessed using the `.app` attribute. So if your `run.py` file looked like this:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as my_demo:\n gr.Markdown(\"# Greetings from Gradio!\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n\n inp.change(fn=lambda x: f\"Welcome, {x}!\", \n inputs=inp, \n outputs=out)\n\nif __name__ == \"__main__\":\n my_demo.launch() \n```\n\nThen you would launch it in reload mode like this: `gradio run.py my_demo.app`. \n\n\ud83d\udd25 If your application accepts command line arguments, you can pass them in as well. Here's an example:\n\n```python\nimport gradio as gr\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--name\", type=str, default=\"User\")\nargs, unknown = parser.parse_known_args()\n\nwith gr.Blocks() as demo:\n gr.Markdown(f\"# Greetings {args.name}!\")\n inp = gr.Textbox()\n out = gr.Textbox()\n\n inp.change(fn=lambda x: x, inputs=inp, outputs=out)\n\nif __name__ == \"__main__\":\n demo.launch()\n```\n\nWhich you could run like this: `gradio run.py --name Gretel`\n\nAs a small aside, this auto-reloading happens if you change your `run.py` source code or the Gradio source code. Meaning that this can be useful if you decide to [contribute to Gradio itself](https://github.com/gradio-app/gradio/blob/main/CONTRIBUTING.md) \u2705\n\n## Jupyter Notebook Magic \ud83d\udd2e\n\nWhat about if you use Jupyter Notebooks (or Colab Notebooks, etc.) to develop code? We got something for you too!\n\nWe've developed a **magic command** that will create and run a Blocks demo for you. To use this, load the gradio extension at the top of your notebook: \n\n`%load_ext gradio`\n\nThen, in the cell that you are developing your Gradio demo, simply write the magic command **`%%blocks`** at the top, and then write the layout and components like you would normally:\n\n```py\n%%blocks \n\nimport gradio as gr\n\ngr.Markdown(\"# Greetings from Gradio!\")\ninp = gr.Textbox(placeholder=\"What is your name?\")\nout = gr.Textbox()\n\ninp.change(fn=lambda x: f\"Welcome, {x}!\", \n inputs=inp, \n outputs=out)\n```\n\nNotice that:\n\n* You do not need to put the boiler plate `with gr.Blocks() as demo:` and `demo.launch()` code \u2014 Gradio does that for you automatically!\n\n* Every time you rerun the cell, Gradio will re-launch your app on the same port and using the same underlying web server. This means you'll see your changes *much, much faster* than if you were rerunning the cell normally. \n\nHere's what it looks like in a jupyter notebook:\n\n![](https://i.ibb.co/nrszFws/Blocks.gif)\n\n\ud83e\ude84 This works in colab notebooks too! [Here's a colab notebook](https://colab.research.google.com/drive/1jUlX1w7JqckRHVE-nbDyMPyZ7fYD8488?authuser=1#scrollTo=zxHYjbCTTz_5) where you can see the Blocks magic in action. Try making some changes and re-running the cell with the Gradio code! \n\nThe Notebook Magic is now the author's preferred way of building Gradio demos. Regardless of how you write Python code, we hope either of these methods will give you a much better development experience using Gradio. \n\n--------\n\n## Next Steps\n\nNow that you know how to develop quickly using Gradio, start building your own! \n\nIf you are looking for inspiration, try exploring demos other people have built with Gradio, [browse public Hugging Face Spaces](http://hf.space/) \ud83e\udd17\n\n", "html": "

Developing Faster with Auto-Reloading

\n\n

Prerequisite: This Guide requires you to know about Blocks. Make sure to read the Guide to Blocks first.

\n\n

This guide covers auto reloading, reloading in a Python IDE, and using gradio with Jupyter Notebooks.

\n\n

Why Auto-Reloading?

\n\n

When you are building a Gradio demo, particularly out of Blocks, you may find it cumbersome to keep re-running your code to test your changes.

\n\n

To make it faster and more convenient to write your code, we've made it easier to \"reload\" your Gradio apps instantly when you are developing in a Python IDE (like VS Code, Sublime Text, PyCharm, or so on) or generally running your Python code from the terminal. We've also developed an analogous \"magic command\" that allows you to re-run cells faster if you use Jupyter Notebooks (or any similar environment like Colab).

\n\n

This short Guide will cover both of these methods, so no matter how you write Python, you'll leave knowing how to build Gradio apps faster.

\n\n

Python IDE Reload \ud83d\udd25

\n\n

If you are building Gradio Blocks using a Python IDE, your file of code (let's name it run.py) might look something like this:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"# Greetings from Gradio!\")\n    inp = gr.Textbox(placeholder=\"What is your name?\")\n    out = gr.Textbox()\n\n    inp.change(fn=lambda x: f\"Welcome, {x}!\", \n               inputs=inp, \n               outputs=out)\n\nif __name__ == \"__main__\":\n    demo.launch()    \n
\n\n

The problem is that anytime that you want to make a change to your layout, events, or components, you have to close and rerun your app by writing python run.py.

\n\n

Instead of doing this, you can run your code in reload mode by changing 1 word: python to gradio:

\n\n

In the terminal, run gradio run.py. That's it!

\n\n

Now, you'll see that after you'll see something like this:

\n\n
Launching in *reload mode* on: http://127.0.0.1:7860 (Press CTRL+C to quit)\n\nWatching...\n\nWARNING:  The --reload flag should not be used in production on Windows.\n
\n\n

The important part here is the line that says Watching... What's happening here is that Gradio will be observing the directory where run.py file lives, and if the file changes, it will automatically rerun the file for you. So you can focus on writing your code, and your Gradio demo will refresh automatically \ud83e\udd73

\n\n

\u26a0\ufe0f Warning: the gradio command does not detect the parameters passed to the launch() methods because the launch() method is never called in reload mode. For example, setting auth, or show_error in launch() will not be reflected in the app.

\n\n

There is one important thing to keep in mind when using the reload mode: Gradio specifically looks for a Gradio Blocks/Interface demo called demo in your code. If you have named your demo something else, you will need to pass in the name of your demo's FastAPI app as the 2nd parameter in your code. For Gradio demos, the FastAPI app can be accessed using the .app attribute. So if your run.py file looked like this:

\n\n
import gradio as gr\n\nwith gr.Blocks() as my_demo:\n    gr.Markdown(\"# Greetings from Gradio!\")\n    inp = gr.Textbox(placeholder=\"What is your name?\")\n    out = gr.Textbox()\n\n    inp.change(fn=lambda x: f\"Welcome, {x}!\", \n               inputs=inp, \n               outputs=out)\n\nif __name__ == \"__main__\":\n    my_demo.launch()    \n
\n\n

Then you would launch it in reload mode like this: gradio run.py my_demo.app.

\n\n

\ud83d\udd25 If your application accepts command line arguments, you can pass them in as well. Here's an example:

\n\n
import gradio as gr\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--name\", type=str, default=\"User\")\nargs, unknown = parser.parse_known_args()\n\nwith gr.Blocks() as demo:\n    gr.Markdown(f\"# Greetings {args.name}!\")\n    inp = gr.Textbox()\n    out = gr.Textbox()\n\n    inp.change(fn=lambda x: x, inputs=inp, outputs=out)\n\nif __name__ == \"__main__\":\n    demo.launch()\n
\n\n

Which you could run like this: gradio run.py --name Gretel

\n\n

As a small aside, this auto-reloading happens if you change your run.py source code or the Gradio source code. Meaning that this can be useful if you decide to contribute to Gradio itself \u2705

\n\n

Jupyter Notebook Magic \ud83d\udd2e

\n\n

What about if you use Jupyter Notebooks (or Colab Notebooks, etc.) to develop code? We got something for you too!

\n\n

We've developed a magic command that will create and run a Blocks demo for you. To use this, load the gradio extension at the top of your notebook:

\n\n

%load_ext gradio

\n\n

Then, in the cell that you are developing your Gradio demo, simply write the magic command %%blocks at the top, and then write the layout and components like you would normally:

\n\n
%%blocks \n\nimport gradio as gr\n\ngr.Markdown(\"# Greetings from Gradio!\")\ninp = gr.Textbox(placeholder=\"What is your name?\")\nout = gr.Textbox()\n\ninp.change(fn=lambda x: f\"Welcome, {x}!\", \n           inputs=inp, \n           outputs=out)\n
\n\n

Notice that:

\n\n
    \n
  • You do not need to put the boiler plate with gr.Blocks() as demo: and demo.launch() code \u2014 Gradio does that for you automatically!

  • \n
  • Every time you rerun the cell, Gradio will re-launch your app on the same port and using the same underlying web server. This means you'll see your changes much, much faster than if you were rerunning the cell normally.

  • \n
\n\n

Here's what it looks like in a jupyter notebook:

\n\n

\"\"

\n\n

\ud83e\ude84 This works in colab notebooks too! Here's a colab notebook where you can see the Blocks magic in action. Try making some changes and re-running the cell with the Gradio code!

\n\n

The Notebook Magic is now the author's preferred way of building Gradio demos. Regardless of how you write Python code, we hope either of these methods will give you a much better development experience using Gradio.

\n\n
\n\n

Next Steps

\n\n

Now that you know how to develop quickly using Gradio, start building your own!

\n\n

If you are looking for inspiration, try exploring demos other people have built with Gradio, browse public Hugging Face Spaces \ud83e\udd17

\n", "tags": [], "spaces": [], "url": "/guides/developing-faster-with-reload-mode/", "contributor": null}, {"name": "how-to-use-3D-model-component", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 38, "pretty_name": "How To Use 3D Model Component", "content": "# How to Use the 3D Model Component\n\n\n\n\n## Introduction\n\n3D models are becoming more popular in machine learning and make for some of the most fun demos to experiment with. Using `gradio`, you can easily build a demo of your 3D image model and share it with anyone. The Gradio 3D Model component accepts 3 file types including: *.obj*, *.glb*, & *.gltf*.\n\nThis guide will show you how to build a demo for your 3D image model in a few lines of code; like the one below. Play around with 3D object by clicking around, dragging and zooming:\n\n \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](https://gradio.app/guides/quickstart).\n\n\n## Taking a Look at the Code\n\nLet's take a look at how to create the minimal interface above. The prediction function in this case will just return the original 3D model mesh, but you can change this function to run inference on your machine learning model. We'll take a look at more complex examples below.\n\n```python\nimport gradio as gr\n\ndef load_mesh(mesh_file_name):\n return mesh_file_name\n\ndemo = gr.Interface(\n fn=load_mesh,\n inputs=gr.Model3D(),\n outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label=\"3D Model\"),\n examples=[\n [\"files/Bunny.obj\"],\n [\"files/Duck.glb\"],\n [\"files/Fox.gltf\"],\n [\"files/face.obj\"],\n ],\n cache_examples=True,\n)\n\ndemo.launch()\n```\n\nLet's break down the code above:\n\n`load_mesh`: This is our 'prediction' function and for simplicity, this function will take in the 3D model mesh and return it.\n\nCreating the Interface:\n\n* `fn`: the prediction function that is used when the user clicks submit. In our case this is the `load_mesh` function.\n* `inputs`: create a model3D input component. The input expects an uploaded file as a {str} filepath.\n* `outputs`: create a model3D output component. The output component also expects a file as a {str} filepath.\n * `clear_color`: this is the background color of the 3D model canvas. Expects RGBa values.\n * `label`: the label that appears on the top left of the component.\n* `examples`: list of 3D model files. The 3D model component can accept *.obj*, *.glb*, & *.gltf* file types.\n* `cache_examples`: saves the predicted output for the examples, to save time on inference.\n\n\n## Exploring mode complex Model3D Demos:\n\nBelow is a demo that uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object. Take a look at the [app.py](https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-obj/blob/main/app.py) file for a peek into the code and the model prediction function.\n \n\nBelow is a demo that uses the PIFu model to convert an image of a clothed human into a 3D digitized model. Take a look at the [spaces.py](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization/blob/main/PIFu/spaces.py) file for a peek into the code and the model prediction function.\n\n \n\n----------\n\nAnd you're done! That's all the code you need to build an interface for your Model3D model. Here are some references that you may find useful:\n\n* Gradio's [\"Getting Started\" guide](https://gradio.app/getting_started/)\n* The first [3D Model Demo](https://huggingface.co/spaces/dawood/Model3D) and [complete code](https://huggingface.co/spaces/dawood/Model3D/tree/main) (on Hugging Face Spaces)\n", "html": "

How to Use the 3D Model Component

\n\n

Introduction

\n\n

3D models are becoming more popular in machine learning and make for some of the most fun demos to experiment with. Using gradio, you can easily build a demo of your 3D image model and share it with anyone. The Gradio 3D Model component accepts 3 file types including: .obj, .glb, & .gltf.

\n\n

This guide will show you how to build a demo for your 3D image model in a few lines of code; like the one below. Play around with 3D object by clicking around, dragging and zooming:

\n\n

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Taking a Look at the Code

\n\n

Let's take a look at how to create the minimal interface above. The prediction function in this case will just return the original 3D model mesh, but you can change this function to run inference on your machine learning model. We'll take a look at more complex examples below.

\n\n
import gradio as gr\n\ndef load_mesh(mesh_file_name):\n    return mesh_file_name\n\ndemo = gr.Interface(\n    fn=load_mesh,\n    inputs=gr.Model3D(),\n    outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0],  label=\"3D Model\"),\n    examples=[\n        [\"files/Bunny.obj\"],\n        [\"files/Duck.glb\"],\n        [\"files/Fox.gltf\"],\n        [\"files/face.obj\"],\n    ],\n    cache_examples=True,\n)\n\ndemo.launch()\n
\n\n

Let's break down the code above:

\n\n

load_mesh: This is our 'prediction' function and for simplicity, this function will take in the 3D model mesh and return it.

\n\n

Creating the Interface:

\n\n
    \n
  • fn: the prediction function that is used when the user clicks submit. In our case this is the load_mesh function.
  • \n
  • inputs: create a model3D input component. The input expects an uploaded file as a {str} filepath.
  • \n
  • outputs: create a model3D output component. The output component also expects a file as a {str} filepath.\n
      \n
    • clear_color: this is the background color of the 3D model canvas. Expects RGBa values.
    • \n
    • label: the label that appears on the top left of the component.
    • \n
  • \n
  • examples: list of 3D model files. The 3D model component can accept .obj, .glb, & .gltf file types.
  • \n
  • cache_examples: saves the predicted output for the examples, to save time on inference.
  • \n
\n\n

Exploring mode complex Model3D Demos:

\n\n

Below is a demo that uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object. Take a look at the app.py file for a peek into the code and the model prediction function.\n

\n\n

Below is a demo that uses the PIFu model to convert an image of a clothed human into a 3D digitized model. Take a look at the spaces.py file for a peek into the code and the model prediction function.

\n\n

\n\n
\n\n

And you're done! That's all the code you need to build an interface for your Model3D model. Here are some references that you may find useful:

\n\n\n", "tags": ["VISION", "IMAGE"], "spaces": ["https://huggingface.co/spaces/dawood/Model3D", "https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization", "https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-obj"], "url": "/guides/how-to-use-3D-model-component/", "contributor": null}, {"name": "named-entity-recognition", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 39, "pretty_name": "Named Entity Recognition", "content": "# Named-Entity Recognition \n\n\n\n\n## Introduction\n\nNamed-entity recognition (NER), also known as token classification or text tagging, is the task of taking a sentence and classifying every word (or \"token\") into different categories, such as names of people or names of locations, or different parts of speech. \n\nFor example, given the sentence:\n\n> Does Chicago have any Pakistani restaurants?\n\nA named-entity recognition algorithm may identify:\n\n* \"Chicago\" as a **location**\n* \"Pakistani\" as an **ethnicity** \n\n\nand so on. \n\nUsing `gradio` (specifically the `HighlightedText` component), you can easily build a web demo of your NER model and share that with the rest of your team.\n\nHere is an example of a demo that you'll be able to build:\n\n\n\nThis tutorial will show how to take a pretrained NER model and deploy it with a Gradio interface. We will show two different ways to use the `HighlightedText` component -- depending on your NER model, either of these two ways may be easier to learn! \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained named-entity recognition model. You can use your own, while in this tutorial, we will use one from the `transformers` library.\n\n### Approach 1: List of Entity Dictionaries\n\nMany named-entity recognition models output a list of dictionaries. Each dictionary consists of an *entity*, a \"start\" index, and an \"end\" index. This is, for example, how NER models in the `transformers` library operate:\n\n```py\nfrom transformers import pipeline \nner_pipeline = pipeline(\"ner\")\nner_pipeline(\"Does Chicago have any Pakistani restaurants\")\n```\n\nOutput:\n\n```bash\n[{'entity': 'I-LOC',\n 'score': 0.9988978,\n 'index': 2,\n 'word': 'Chicago',\n 'start': 5,\n 'end': 12},\n {'entity': 'I-MISC',\n 'score': 0.9958592,\n 'index': 5,\n 'word': 'Pakistani',\n 'start': 22,\n 'end': 31}]\n```\n\nIf you have such a model, it is very easy to hook it up to Gradio's `HighlightedText` component. All you need to do is pass in this **list of entities**, along with the **original text** to the model, together as dictionary, with the keys being `\"entities\"` and `\"text\"` respectively.\n\nHere is a complete example:\n\n```python\nfrom transformers import pipeline\n\nimport gradio as gr\n\nner_pipeline = pipeline(\"ner\")\n\nexamples = [\n \"Does Chicago have any stores and does Joe live here?\",\n]\n\ndef ner(text):\n output = ner_pipeline(text)\n return {\"text\": text, \"entities\": output} \n\ndemo = gr.Interface(ner,\n gr.Textbox(placeholder=\"Enter sentence here...\"), \n gr.HighlightedText(),\n examples=examples)\n\ndemo.launch()\n\n```\n\n\n### Approach 2: List of Tuples\n\nAn alternative way to pass data into the `HighlightedText` component is a list of tuples. The first element of each tuple should be the word or words that are being classified into a particular entity. The second element should be the entity label (or `None` if they should be unlabeled). The `HighlightedText` component automatically strings together the words and labels to display the entities.\n\nIn some cases, this can be easier than the first approach. Here is a demo showing this approach using Spacy's parts-of-speech tagger:\n\n```python\nimport gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n doc = nlp(text)\n html = displacy.render(doc, style=\"dep\", page=True)\n html = (\n \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n\n```\n\n\n\n--------------------------------------------\n\n\nAnd you're done! That's all you need to know to build a web-based GUI for your NER model. \n\nFun tip: you can share your NER demo instantly with others simply by setting `share=True` in `launch()`. \n\n\n", "html": "

Named-Entity Recognition

\n\n

Introduction

\n\n

Named-entity recognition (NER), also known as token classification or text tagging, is the task of taking a sentence and classifying every word (or \"token\") into different categories, such as names of people or names of locations, or different parts of speech.

\n\n

For example, given the sentence:

\n\n
\n

Does Chicago have any Pakistani restaurants?

\n
\n\n

A named-entity recognition algorithm may identify:

\n\n
    \n
  • \"Chicago\" as a location
  • \n
  • \"Pakistani\" as an ethnicity
  • \n
\n\n

and so on.

\n\n

Using gradio (specifically the HighlightedText component), you can easily build a web demo of your NER model and share that with the rest of your team.

\n\n

Here is an example of a demo that you'll be able to build:

\n\n

\n\n

This tutorial will show how to take a pretrained NER model and deploy it with a Gradio interface. We will show two different ways to use the HighlightedText component -- depending on your NER model, either of these two ways may be easier to learn!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained named-entity recognition model. You can use your own, while in this tutorial, we will use one from the transformers library.

\n\n

Approach 1: List of Entity Dictionaries

\n\n

Many named-entity recognition models output a list of dictionaries. Each dictionary consists of an entity, a \"start\" index, and an \"end\" index. This is, for example, how NER models in the transformers library operate:

\n\n
from transformers import pipeline \nner_pipeline = pipeline(\"ner\")\nner_pipeline(\"Does Chicago have any Pakistani restaurants\")\n
\n\n

Output:

\n\n
[{'entity': 'I-LOC',\n  'score': 0.9988978,\n  'index': 2,\n  'word': 'Chicago',\n  'start': 5,\n  'end': 12},\n {'entity': 'I-MISC',\n  'score': 0.9958592,\n  'index': 5,\n  'word': 'Pakistani',\n  'start': 22,\n  'end': 31}]\n
\n\n

If you have such a model, it is very easy to hook it up to Gradio's HighlightedText component. All you need to do is pass in this list of entities, along with the original text to the model, together as dictionary, with the keys being \"entities\" and \"text\" respectively.

\n\n

Here is a complete example:

\n\n
from transformers import pipeline\n\nimport gradio as gr\n\nner_pipeline = pipeline(\"ner\")\n\nexamples = [\n    \"Does Chicago have any stores and does Joe live here?\",\n]\n\ndef ner(text):\n    output = ner_pipeline(text)\n    return {\"text\": text, \"entities\": output}    \n\ndemo = gr.Interface(ner,\n             gr.Textbox(placeholder=\"Enter sentence here...\"), \n             gr.HighlightedText(),\n             examples=examples)\n\ndemo.launch()\n\n
\n\n

\n\n

Approach 2: List of Tuples

\n\n

An alternative way to pass data into the HighlightedText component is a list of tuples. The first element of each tuple should be the word or words that are being classified into a particular entity. The second element should be the entity label (or None if they should be unlabeled). The HighlightedText component automatically strings together the words and labels to display the entities.

\n\n

In some cases, this can be easier than the first approach. Here is a demo showing this approach using Spacy's parts-of-speech tagger:

\n\n
import gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n    doc = nlp(text)\n    html = displacy.render(doc, style=\"dep\", page=True)\n    html = (\n        \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n\n
\n\n

\n\n
\n\n

And you're done! That's all you need to know to build a web-based GUI for your NER model.

\n\n

Fun tip: you can share your NER demo instantly with others simply by setting share=True in launch().

\n", "tags": ["NER", "TEXT", "HIGHLIGHT"], "spaces": ["https://huggingface.co/spaces/rajistics/biobert_ner_demo", "https://huggingface.co/spaces/abidlabs/ner", "https://huggingface.co/spaces/rajistics/Financial_Analyst_AI"], "url": "/guides/named-entity-recognition/", "contributor": null}, {"name": "real-time-speech-recognition", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 40, "pretty_name": "Real Time Speech Recognition", "content": "# Real Time Speech Recognition \n\n\n\n\n## Introduction\n\nAutomatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).\n\nUsing `gradio`, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.\n\nThis tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a ***full-context*** model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it ***streaming***, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused)!):\n\n\n\nReal-time ASR is inherently *stateful*, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use **state** with Gradio demos. \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:\n\n* Transformers (for this, `pip install transformers` and `pip install torch`) \n* DeepSpeech (`pip install deepspeech==0.8.2`)\n\nMake sure you have at least one of these installed so that you can follow along the tutorial. You will also need `ffmpeg` [installed on your system](https://www.ffmpeg.org/download.html), if you do not already have it, to process files from the microphone.\n\nHere's how to build a real time speech recognition (ASR) app: \n\n1. [Set up the Transformers ASR Model](#1-set-up-the-transformers-asr-model)\n2. [Create a Full-Context ASR Demo with Transformers](#2-create-a-full-context-asr-demo-with-transformers) \n3. [Create a Streaming ASR Demo with Transformers](#3-create-a-streaming-asr-demo-with-transformers)\n4. [Create a Streaming ASR Demo with DeepSpeech](#4-create-a-streaming-asr-demo-with-deep-speech)\n\n\n## 1. Set up the Transformers ASR Model\n\nFirst, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, `Wav2Vec2`. \n\nHere is the code to load `Wav2Vec2` from Hugging Face `transformers`.\n\n```python\nfrom transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n```\n\nThat's it! By default, the automatic speech recognition model pipeline loads Facebook's `facebook/wav2vec2-base-960h` model.\n\n## 2. Create a Full-Context ASR Demo with Transformers \n\nWe will start by creating a *full-context* ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the `pipeline` object above.\n\nWe will use `gradio`'s built in `Audio` component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain `Textbox`.\n\n```python\nimport gradio as gr\n\ndef transcribe(audio):\n text = p(audio)[\"text\"]\n return text\n\ngr.Interface(\n fn=transcribe, \n inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n outputs=\"text\").launch()\n```\n\nSo what's happening here? The `transcribe` function takes a single parameter, `audio`, which is a filepath to the audio file that the user has recorded. The `pipeline` object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox. \n\nLet's see it in action! (Record a short audio clip and then click submit, or [open in a new tab](https://huggingface.co/spaces/abidlabs/full-context-asr)):\n\n\n\n## 3. Create a Streaming ASR Demo with Transformers\n\nOk great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a *streaming* interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.\n\nThe good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same `Wav2Vec2` model. \n\nThe biggest change is that we must now introduce a `state` parameter, which holds the audio that has been *transcribed so far*. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed. \n\nWhen adding state to a Gradio demo, you need to do a total of 3 things:\n\n* Add a `state` parameter to the function\n* Return the updated `state` at the end of the function\n* Add the `\"state\"` components to the `inputs` and `outputs` in `Interface` \n\nHere's what the code looks like:\n\n```python\ndef transcribe(audio, state=\"\"):\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\" \n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nNotice that we've also made one other change, which is that we've set `live=True`. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.\n\nLet's see how it does (try below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr))!\n\n\n\n\nOne thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the `transcribe()` function so that longer audio chunks are processed. We can do this by adding a `time.sleep()` inside the function, as shown below (we'll see a proper fix next) \n\n```python\nfrom transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n time.sleep(2)\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\"\n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nTry the demo below to see the difference (or [open in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused))!\n\n\n\n\n## 4. Create a Streaming ASR Demo with DeepSpeech\n\nYou're not restricted to ASR models from the `transformers` library -- you can use your own models or models from other libraries. The `DeepSpeech` library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.\n\nGoing through the DeepSpeech library is beyond the scope of this Guide (check out their [excellent documentation here](https://deepspeech.readthedocs.io/en/r0.9/)), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model. \n\nHere's a complete example (on Linux):\n\nFirst install the DeepSpeech library and download the pretrained models from the terminal:\n\n```bash\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n```\n\nThen, create a similar `transcribe()` function as before:\n\n```python\nfrom deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n if sr not in (\n 48000,\n 16000,\n ): # Deepspeech only supports 16k, (we convert 48k -> 16k)\n raise ValueError(\"Unsupported rate\", sr)\n if sr == 48000:\n y = (\n ((y / max(np.max(y), 1)) * 32767)\n .reshape((-1, 3))\n .mean(axis=1)\n .astype(\"int16\")\n )\n sr = 16000\n return sr, y\n\n\ndef transcribe(speech, stream):\n _, y = reformat_freq(*speech)\n if stream is None:\n stream = model.createStream()\n stream.feedAudioContent(y)\n text = stream.intermediateDecode()\n return text, stream\n\n```\n\nThen, create a Gradio Interface as before (the only difference being that the return type should be `numpy` instead of a `filepath` to be compatible with the DeepSpeech models)\n\n```python\nimport gradio as gr\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"numpy\"), \n \"state\" \n ], \n outputs= [\n \"text\", \n \"state\"\n ], \n live=True).launch()\n```\n\nRunning all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.\n\n--------------------------------------------\n\n\nAnd you're done! That's all the code you need to build a web-based GUI for your ASR model. \n\nFun tip: you can share your ASR model instantly with others simply by setting `share=True` in `launch()`. \n\n\n", "html": "

Real Time Speech Recognition

\n\n

Introduction

\n\n

Automatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).

\n\n

Using gradio, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.

\n\n

This tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a full-context model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it streaming, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or in a new tab!):

\n\n\n\n

Real-time ASR is inherently stateful, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use state with Gradio demos.

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:

\n\n
    \n
  • Transformers (for this, pip install transformers and pip install torch)
  • \n
  • DeepSpeech (pip install deepspeech==0.8.2)
  • \n
\n\n

Make sure you have at least one of these installed so that you can follow along the tutorial. You will also need ffmpeg installed on your system, if you do not already have it, to process files from the microphone.

\n\n

Here's how to build a real time speech recognition (ASR) app:

\n\n
    \n
  1. Set up the Transformers ASR Model
  2. \n
  3. Create a Full-Context ASR Demo with Transformers
  4. \n
  5. Create a Streaming ASR Demo with Transformers
  6. \n
  7. Create a Streaming ASR Demo with DeepSpeech
  8. \n
\n\n

1. Set up the Transformers ASR Model

\n\n

First, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, Wav2Vec2.

\n\n

Here is the code to load Wav2Vec2 from Hugging Face transformers.

\n\n
from transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n
\n\n

That's it! By default, the automatic speech recognition model pipeline loads Facebook's facebook/wav2vec2-base-960h model.

\n\n

2. Create a Full-Context ASR Demo with Transformers

\n\n

We will start by creating a full-context ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the pipeline object above.

\n\n

We will use gradio's built in Audio component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain Textbox.

\n\n
import gradio as gr\n\ndef transcribe(audio):\n    text = p(audio)[\"text\"]\n    return text\n\ngr.Interface(\n    fn=transcribe, \n    inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n    outputs=\"text\").launch()\n
\n\n

So what's happening here? The transcribe function takes a single parameter, audio, which is a filepath to the audio file that the user has recorded. The pipeline object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox.

\n\n

Let's see it in action! (Record a short audio clip and then click submit, or open in a new tab):

\n\n\n\n

3. Create a Streaming ASR Demo with Transformers

\n\n

Ok great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a streaming interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.

\n\n

The good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same Wav2Vec2 model.

\n\n

The biggest change is that we must now introduce a state parameter, which holds the audio that has been transcribed so far. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed.

\n\n

When adding state to a Gradio demo, you need to do a total of 3 things:

\n\n
    \n
  • Add a state parameter to the function
  • \n
  • Return the updated state at the end of the function
  • \n
  • Add the \"state\" components to the inputs and outputs in Interface
  • \n
\n\n

Here's what the code looks like:

\n\n
def transcribe(audio, state=\"\"):\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\" \n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Notice that we've also made one other change, which is that we've set live=True. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.

\n\n

Let's see how it does (try below or in a new tab)!

\n\n\n\n

One thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the transcribe() function so that longer audio chunks are processed. We can do this by adding a time.sleep() inside the function, as shown below (we'll see a proper fix next)

\n\n
from transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n    time.sleep(2)\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\"\n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Try the demo below to see the difference (or open in a new tab)!

\n\n\n\n

4. Create a Streaming ASR Demo with DeepSpeech

\n\n

You're not restricted to ASR models from the transformers library -- you can use your own models or models from other libraries. The DeepSpeech library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.

\n\n

Going through the DeepSpeech library is beyond the scope of this Guide (check out their excellent documentation here), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model.

\n\n

Here's a complete example (on Linux):

\n\n

First install the DeepSpeech library and download the pretrained models from the terminal:

\n\n
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n
\n\n

Then, create a similar transcribe() function as before:

\n\n
from deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n    if sr not in (\n        48000,\n        16000,\n    ):  # Deepspeech only supports 16k, (we convert 48k -> 16k)\n        raise ValueError(\"Unsupported rate\", sr)\n    if sr == 48000:\n        y = (\n            ((y / max(np.max(y), 1)) * 32767)\n            .reshape((-1, 3))\n            .mean(axis=1)\n            .astype(\"int16\")\n        )\n        sr = 16000\n    return sr, y\n\n\ndef transcribe(speech, stream):\n    _, y = reformat_freq(*speech)\n    if stream is None:\n        stream = model.createStream()\n    stream.feedAudioContent(y)\n    text = stream.intermediateDecode()\n    return text, stream\n\n
\n\n

Then, create a Gradio Interface as before (the only difference being that the return type should be numpy instead of a filepath to be compatible with the DeepSpeech models)

\n\n
import gradio as gr\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"numpy\"), \n        \"state\" \n    ], \n    outputs= [\n        \"text\", \n        \"state\"\n    ], \n    live=True).launch()\n
\n\n

Running all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.

\n\n
\n\n

And you're done! That's all the code you need to build a web-based GUI for your ASR model.

\n\n

Fun tip: you can share your ASR model instantly with others simply by setting share=True in launch().

\n", "tags": ["ASR", "SPEECH", "STREAMING"], "spaces": ["https://huggingface.co/spaces/abidlabs/streaming-asr-paused", "https://huggingface.co/spaces/abidlabs/full-context-asr"], "url": "/guides/real-time-speech-recognition/", "contributor": null}, {"name": "running-background-tasks", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 41, "pretty_name": "Running Background Tasks", "content": "# Running Background Tasks \n\n\n\n\n## Introduction\n\nThis guide explains how you can run background tasks from your gradio app.\nBackground tasks are operations that you'd like to perform outside the request-response\nlifecycle of your app either once or on a periodic schedule.\nExamples of background tasks include periodically synchronizing data to an external database or \nsending a report of model predictions via email.\n\n## Overview \n \nWe will be creating a simple \"Google-forms-style\" application to gather feedback from users of the gradio library.\nWe will use a local sqlite database to store our data, but we will periodically synchronize the state of the database\nwith a [HuggingFace Dataset](https://huggingface.co/datasets) so that our user reviews are always backed up.\nThe synchronization will happen in a background task running every 60 seconds.\n\nAt the end of the demo, you'll have a fully working application like this one:\n\n \n\n\n## Step 1 - Write your database logic \ud83d\udcbe\nOur application will store the name of the reviewer, their rating of gradio on a scale of 1 to 5, as well as\nany comments they want to share about the library. Let's write some code that creates a database table to\nstore this data. We'll also write some functions to insert a review into that table and fetch the latest 10 reviews.\n\nWe're going to use the `sqlite3` library to connect to our sqlite database but gradio will work with any library.\n\nThe code will look like this:\n\n```python\nDB_FILE = \"./reviews.db\"\ndb = sqlite3.connect(DB_FILE)\n\n# Create table if it doesn't already exist\ntry:\n db.execute(\"SELECT * FROM reviews\").fetchall()\n db.close()\nexcept sqlite3.OperationalError:\n db.execute(\n '''\n CREATE TABLE reviews (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,\n name TEXT, review INTEGER, comments TEXT)\n ''')\n db.commit()\n db.close()\n\ndef get_latest_reviews(db: sqlite3.Connection):\n reviews = db.execute(\"SELECT * FROM reviews ORDER BY id DESC limit 10\").fetchall()\n total_reviews = db.execute(\"Select COUNT(id) from reviews\").fetchone()[0]\n reviews = pd.DataFrame(reviews, columns=[\"id\", \"date_created\", \"name\", \"review\", \"comments\"])\n return reviews, total_reviews\n\n\ndef add_review(name: str, review: int, comments: str):\n db = sqlite3.connect(DB_FILE)\n cursor = db.cursor()\n cursor.execute(\"INSERT INTO reviews(name, review, comments) VALUES(?,?,?)\", [name, review, comments])\n db.commit()\n reviews, total_reviews = get_latest_reviews(db)\n db.close()\n return reviews, total_reviews\n```\n\nLet's also write a function to load the latest reviews when the gradio application loads:\n```python\ndef load_data():\n db = sqlite3.connect(DB_FILE)\n reviews, total_reviews = get_latest_reviews(db)\n db.close()\n return reviews, total_reviews\n```\n\n## Step 2 - Create a gradio app \u26a1\nNow that we have our database logic defined, we can use gradio create a dynamic web page to ask our users for feedback! \n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n name = gr.Textbox(label=\"Name\", placeholder=\"What is your name?\")\n review = gr.Radio(label=\"How satisfied are you with using gradio?\", choices=[1, 2, 3, 4, 5])\n comments = gr.Textbox(label=\"Comments\", lines=10, placeholder=\"Do you have any feedback on gradio?\")\n submit = gr.Button(value=\"Submit Feedback\")\n with gr.Column():\n data = gr.Dataframe(label=\"Most recently created 10 rows\")\n count = gr.Number(label=\"Total number of reviews\")\n submit.click(add_review, [name, review, comments], [data, count])\n demo.load(load_data, None, [data, count])\n```\n\n## Step 3 - Synchronize with HuggingFace Datasets \ud83e\udd17\n\nWe could call `demo.launch()` after step 2 and have a fully functioning application. However,\nour data would be stored locally on our machine. If the sqlite file were accidentally deleted, we'd lose all of our reviews!\nLet's back up our data to a dataset on the HuggingFace hub.\n\nCreate a dataset [here](https://huggingface.co/datasets) before proceeding.\n\nNow at the **top** of our script, we'll use the [huggingface hub client library](https://huggingface.co/docs/huggingface_hub/index)\nto connect to our dataset and pull the latest backup.\n\n```python\nTOKEN = os.environ.get('HUB_TOKEN')\nrepo = huggingface_hub.Repository(\n local_dir=\"data\",\n repo_type=\"dataset\",\n clone_from=\"\",\n use_auth_token=TOKEN\n)\nrepo.git_pull()\n\nshutil.copyfile(\"./data/reviews.db\", DB_FILE)\n```\n\nNote that you'll have to get an access token from the \"Settings\" tab of your HuggingFace for the above code to work.\nIn the script, the token is securely accessed via an environment variable.\n\n![access_token](https://github.com/gradio-app/gradio/blob/main/guides/assets/access_token.png?raw=true)\n\nNow we will create a background task to synch our local database to the dataset hub every 60 seconds.\nWe will use the [AdvancedPythonScheduler](https://apscheduler.readthedocs.io/en/3.x/) to handle the scheduling.\nHowever, this is not the only task scheduling library available. Feel free to use whatever you are comfortable with.\n\nThe function to back up our data will look like this:\n\n```python\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\ndef backup_db():\n shutil.copyfile(DB_FILE, \"./data/reviews.db\")\n db = sqlite3.connect(DB_FILE)\n reviews = db.execute(\"SELECT * FROM reviews\").fetchall()\n pd.DataFrame(reviews).to_csv(\"./data/reviews.csv\", index=False)\n print(\"updating db\")\n repo.push_to_hub(blocking=False, commit_message=f\"Updating data at {datetime.datetime.now()}\")\n\n\nscheduler = BackgroundScheduler()\nscheduler.add_job(func=backup_db, trigger=\"interval\", seconds=60)\nscheduler.start()\n```\n\n\n## Step 4 (Bonus) - Deployment to HuggingFace Spaces\nYou can use the HuggingFace [Spaces](https://huggingface.co/spaces) platform to deploy this application for free \u2728\n\nIf you haven't used Spaces before, follow the previous guide [here](/using_hugging_face_integrations).\nYou will have to use the `HUB_TOKEN` environment variable as a secret in the Guides.\n\n## Conclusion\nCongratulations! You know how to run background tasks from your gradio app on a schedule \u23f2\ufe0f. \n\nCheckout the application running on Spaces [here](https://huggingface.co/spaces/freddyaboulton/gradio-google-forms).\nThe complete code is [here](https://huggingface.co/spaces/freddyaboulton/gradio-google-forms/blob/main/app.py)", "html": "

Running Background Tasks

\n\n

Introduction

\n\n

This guide explains how you can run background tasks from your gradio app.\nBackground tasks are operations that you'd like to perform outside the request-response\nlifecycle of your app either once or on a periodic schedule.\nExamples of background tasks include periodically synchronizing data to an external database or \nsending a report of model predictions via email.

\n\n

Overview

\n\n

We will be creating a simple \"Google-forms-style\" application to gather feedback from users of the gradio library.\nWe will use a local sqlite database to store our data, but we will periodically synchronize the state of the database\nwith a HuggingFace Dataset so that our user reviews are always backed up.\nThe synchronization will happen in a background task running every 60 seconds.

\n\n

At the end of the demo, you'll have a fully working application like this one:

\n\n

\n\n

Step 1 - Write your database logic \ud83d\udcbe

\n\n

Our application will store the name of the reviewer, their rating of gradio on a scale of 1 to 5, as well as\nany comments they want to share about the library. Let's write some code that creates a database table to\nstore this data. We'll also write some functions to insert a review into that table and fetch the latest 10 reviews.

\n\n

We're going to use the sqlite3 library to connect to our sqlite database but gradio will work with any library.

\n\n

The code will look like this:

\n\n
DB_FILE = \"./reviews.db\"\ndb = sqlite3.connect(DB_FILE)\n\n# Create table if it doesn't already exist\ntry:\n    db.execute(\"SELECT * FROM reviews\").fetchall()\n    db.close()\nexcept sqlite3.OperationalError:\n    db.execute(\n        '''\n        CREATE TABLE reviews (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n                              created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,\n                              name TEXT, review INTEGER, comments TEXT)\n        ''')\n    db.commit()\n    db.close()\n\ndef get_latest_reviews(db: sqlite3.Connection):\n    reviews = db.execute(\"SELECT * FROM reviews ORDER BY id DESC limit 10\").fetchall()\n    total_reviews = db.execute(\"Select COUNT(id) from reviews\").fetchone()[0]\n    reviews = pd.DataFrame(reviews, columns=[\"id\", \"date_created\", \"name\", \"review\", \"comments\"])\n    return reviews, total_reviews\n\n\ndef add_review(name: str, review: int, comments: str):\n    db = sqlite3.connect(DB_FILE)\n    cursor = db.cursor()\n    cursor.execute(\"INSERT INTO reviews(name, review, comments) VALUES(?,?,?)\", [name, review, comments])\n    db.commit()\n    reviews, total_reviews = get_latest_reviews(db)\n    db.close()\n    return reviews, total_reviews\n
\n\n

Let's also write a function to load the latest reviews when the gradio application loads:

\n\n
def load_data():\n    db = sqlite3.connect(DB_FILE)\n    reviews, total_reviews = get_latest_reviews(db)\n    db.close()\n    return reviews, total_reviews\n
\n\n

Step 2 - Create a gradio app \u26a1

\n\n

Now that we have our database logic defined, we can use gradio create a dynamic web page to ask our users for feedback!

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            name = gr.Textbox(label=\"Name\", placeholder=\"What is your name?\")\n            review = gr.Radio(label=\"How satisfied are you with using gradio?\", choices=[1, 2, 3, 4, 5])\n            comments = gr.Textbox(label=\"Comments\", lines=10, placeholder=\"Do you have any feedback on gradio?\")\n            submit = gr.Button(value=\"Submit Feedback\")\n        with gr.Column():\n            data = gr.Dataframe(label=\"Most recently created 10 rows\")\n            count = gr.Number(label=\"Total number of reviews\")\n    submit.click(add_review, [name, review, comments], [data, count])\n    demo.load(load_data, None, [data, count])\n
\n\n

Step 3 - Synchronize with HuggingFace Datasets \ud83e\udd17

\n\n

We could call demo.launch() after step 2 and have a fully functioning application. However,\nour data would be stored locally on our machine. If the sqlite file were accidentally deleted, we'd lose all of our reviews!\nLet's back up our data to a dataset on the HuggingFace hub.

\n\n

Create a dataset here before proceeding.

\n\n

Now at the top of our script, we'll use the huggingface hub client library\nto connect to our dataset and pull the latest backup.

\n\n
TOKEN = os.environ.get('HUB_TOKEN')\nrepo = huggingface_hub.Repository(\n    local_dir=\"data\",\n    repo_type=\"dataset\",\n    clone_from=\"\",\n    use_auth_token=TOKEN\n)\nrepo.git_pull()\n\nshutil.copyfile(\"./data/reviews.db\", DB_FILE)\n
\n\n

Note that you'll have to get an access token from the \"Settings\" tab of your HuggingFace for the above code to work.\nIn the script, the token is securely accessed via an environment variable.

\n\n

\"access_token\"

\n\n

Now we will create a background task to synch our local database to the dataset hub every 60 seconds.\nWe will use the AdvancedPythonScheduler to handle the scheduling.\nHowever, this is not the only task scheduling library available. Feel free to use whatever you are comfortable with.

\n\n

The function to back up our data will look like this:

\n\n
from apscheduler.schedulers.background import BackgroundScheduler\n\ndef backup_db():\n    shutil.copyfile(DB_FILE, \"./data/reviews.db\")\n    db = sqlite3.connect(DB_FILE)\n    reviews = db.execute(\"SELECT * FROM reviews\").fetchall()\n    pd.DataFrame(reviews).to_csv(\"./data/reviews.csv\", index=False)\n    print(\"updating db\")\n    repo.push_to_hub(blocking=False, commit_message=f\"Updating data at {datetime.datetime.now()}\")\n\n\nscheduler = BackgroundScheduler()\nscheduler.add_job(func=backup_db, trigger=\"interval\", seconds=60)\nscheduler.start()\n
\n\n

Step 4 (Bonus) - Deployment to HuggingFace Spaces

\n\n

You can use the HuggingFace Spaces platform to deploy this application for free \u2728

\n\n

If you haven't used Spaces before, follow the previous guide here.\nYou will have to use the HUB_TOKEN environment variable as a secret in the Guides.

\n\n

Conclusion

\n\n

Congratulations! You know how to run background tasks from your gradio app on a schedule \u23f2\ufe0f.

\n\n

Checkout the application running on Spaces here.\nThe complete code is here

\n", "tags": ["TASKS", "SCHEDULED", "TABULAR", "DATA "], "spaces": ["https://huggingface.co/spaces/freddyaboulton/gradio-google-forms"], "url": "/guides/running-background-tasks/", "contributor": null}, {"name": "running-gradio-on-your-web-server-with-nginx", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 42, "pretty_name": "Running Gradio On Your Web Server With Nginx", "content": "# Running a Gradio App on your Web Server with Nginx\n\n\n\n## Introduction\n\nGradio is a Python library that allows you to quickly create customizable web apps for your machine learning models and data processing pipelines. Gradio apps can be deployed on [Hugging Face Spaces](https://hf.space) for free. \n\nIn some cases though, you might want to deploy a Gradio app on your own web server. You might already be using [Nginx](https://www.nginx.com/), a highly performant web server, to serve your website (say `https://www.example.com`), and you want to attach Gradio to a specific subpath on your website (e.g. `https://www.example.com/gradio-demo`). \n\nIn this Guide, we will guide you through the process of running a Gradio app behind Nginx on your own web server to achieve this.\n\n**Prerequisites**\n\n1. A Linux web server with [Nginx installed](https://www.nginx.com/blog/setting-up-nginx/) and [Gradio installed](/quickstart) \n \n2. A working Gradio app saved as a python file on your web server\n\n## Editing your Nginx configuration file\n\n1. Start by editing the Nginx configuration file on your web server. By default, this is located at: `/etc/nginx/nginx.conf`\n\nIn the `http` block, add the following line to include server block configurations from a separate file:\n\n```bash\ninclude /etc/nginx/sites-enabled/*;\n```\n\n2. Create a new file in the `/etc/nginx/sites-available` directory (create the directory if it does not already exist), using a filename that represents your app, for example: `sudo nano /etc/nginx/sites-available/my_gradio_app`\n\n3. Paste the following into your file editor:\n\n```bash\nserver {\n listen 80;\n server_name example.com www.example.com; # Change this to your domain name \n\n location /gradio-demo/ { # Change this if you'd like to server your Gradio app on a different path\n proxy_pass http://127.0.0.1:7860/; # Change this if your Gradio app will be running on a different port\n proxy_redirect off;\n proxy_http_version 1.1;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"upgrade\";\n proxy_set_header Host $host;\n }\n}\n```\n\n## Run your Gradio app on your web server\n\n1. Before you launch your Gradio app, you'll need to set the `root_path` to be the same as the subpath that you specified in your nginx configuration. This is necessary for Gradio to run on any subpath besides the root of the domain.\n\nHere's a simple example of a Gradio app with a custom `root_path`:\n\n```python\nimport gradio as gr\nimport time\n\ndef test(x):\ntime.sleep(4)\nreturn x\n\ngr.Interface(test, \"textbox\", \"textbox\").queue().launch(root_path=\"/gradio-demo\")\n```\n\n2. Start a `tmux` session by typing `tmux` and pressing enter (optional) \n\nIt's recommended that you run your Gradio app in a `tmux` session so that you can keep it running in the background easily\n\n3. Then, start your Gradio app. Simply type in `python` followed by the name of your Gradio python file. By default, your app will run on `localhost:7860`, but if it starts on a different port, you will need to update the nginx configuration file above.\n\n## Restart Nginx\n\n1. If you are in a tmux session, exit by typing CTRL+B (or CMD+B), followed by the \"D\" key.\n\n2. Finally, restart nginx by running `sudo systemctl restart nginx`. \n\nAnd that's it! If you visit `https://example.com/gradio-demo` on your browser, you should see your Gradio app running there\n\n", "html": "

Running a Gradio App on your Web Server with Nginx

\n\n

Introduction

\n\n

Gradio is a Python library that allows you to quickly create customizable web apps for your machine learning models and data processing pipelines. Gradio apps can be deployed on Hugging Face Spaces for free.

\n\n

In some cases though, you might want to deploy a Gradio app on your own web server. You might already be using Nginx, a highly performant web server, to serve your website (say https://www.example.com), and you want to attach Gradio to a specific subpath on your website (e.g. https://www.example.com/gradio-demo).

\n\n

In this Guide, we will guide you through the process of running a Gradio app behind Nginx on your own web server to achieve this.

\n\n

Prerequisites

\n\n
    \n
  1. A Linux web server with Nginx installed and Gradio installed

  2. \n
  3. A working Gradio app saved as a python file on your web server

  4. \n
\n\n

Editing your Nginx configuration file

\n\n
    \n
  1. Start by editing the Nginx configuration file on your web server. By default, this is located at: /etc/nginx/nginx.conf
  2. \n
\n\n

In the http block, add the following line to include server block configurations from a separate file:

\n\n
include /etc/nginx/sites-enabled/*;\n
\n\n
    \n
  1. Create a new file in the /etc/nginx/sites-available directory (create the directory if it does not already exist), using a filename that represents your app, for example: sudo nano /etc/nginx/sites-available/my_gradio_app

  2. \n
  3. Paste the following into your file editor:

  4. \n
\n\n
server {\n    listen 80;\n    server_name example.com www.example.com;  # Change this to your domain name \n\n    location /gradio-demo/ {  # Change this if you'd like to server your Gradio app on a different path\n        proxy_pass http://127.0.0.1:7860/; # Change this if your Gradio app will be running on a different port\n        proxy_redirect off;\n        proxy_http_version 1.1;\n        proxy_set_header Upgrade $http_upgrade;\n        proxy_set_header Connection \"upgrade\";\n        proxy_set_header Host $host;\n    }\n}\n
\n\n

Run your Gradio app on your web server

\n\n
    \n
  1. Before you launch your Gradio app, you'll need to set the root_path to be the same as the subpath that you specified in your nginx configuration. This is necessary for Gradio to run on any subpath besides the root of the domain.
  2. \n
\n\n

Here's a simple example of a Gradio app with a custom root_path:

\n\n
import gradio as gr\nimport time\n\ndef test(x):\ntime.sleep(4)\nreturn x\n\ngr.Interface(test, \"textbox\", \"textbox\").queue().launch(root_path=\"/gradio-demo\")\n
\n\n
    \n
  1. Start a tmux session by typing tmux and pressing enter (optional)
  2. \n
\n\n

It's recommended that you run your Gradio app in a tmux session so that you can keep it running in the background easily

\n\n
    \n
  1. Then, start your Gradio app. Simply type in python followed by the name of your Gradio python file. By default, your app will run on localhost:7860, but if it starts on a different port, you will need to update the nginx configuration file above.
  2. \n
\n\n

Restart Nginx

\n\n
    \n
  1. If you are in a tmux session, exit by typing CTRL+B (or CMD+B), followed by the \"D\" key.

  2. \n
  3. Finally, restart nginx by running sudo systemctl restart nginx.

  4. \n
\n\n

And that's it! If you visit https://example.com/gradio-demo on your browser, you should see your Gradio app running there

\n", "tags": ["DEPLOYMENT", "WEB SERVER", "NGINX"], "spaces": [], "url": "/guides/running-gradio-on-your-web-server-with-nginx/", "contributor": null}, {"name": "setting-up-a-demo-for-maximum-performance", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 43, "pretty_name": "Setting Up A Demo For Maximum Performance", "content": "# Setting Up a Demo for Maximum Performance\n\n\n\n\nLet's say that your Gradio demo goes *viral* on social media -- you have lots of users trying it out simultaneously, and you want to provide your users with the best possible experience or, in other words, minimize the amount of time that each user has to wait in the queue to see their prediction.\n\nHow can you configure your Gradio demo to handle the most traffic? In this Guide, we dive into some of the parameters of Gradio's `.queue()` method as well as some other related configurations, and discuss how to set these parameters in a way that allows you to serve lots of users simultaneously with minimal latency.\n\nThis is an advanced guide, so make sure you know the basics of Gradio already, such as [how to create and launch a Gradio Interface](https://gradio.app/guides/quickstart/). Most of the information in this Guide is relevant whether you are hosting your demo on [Hugging Face Spaces](https://hf.space) or on your own server.\n\n## Enabling Gradio's Queueing System\n\nBy default, a Gradio demo does not use queueing and instead sends prediction requests via a POST request to the server where your Gradio server and Python code are running. However, regular POST requests have two big limitations:\n\n(1) They time out -- most browsers raise a timeout error\nif they do not get a response to a POST request after a short period of time (e.g. 1 min).\nThis can be a problem if your inference function takes longer than 1 minute to run or\nif many people are trying out your demo at the same time, resulting in increased latency.\n\n(2) They do not allow bi-directional communication between the Gradio demo and the Gradio server. This means, for example, that you cannot get a real-time ETA of how long your prediction will take to complete.\n\nTo address these limitations, any Gradio app can be converted to use **websockets** instead, simply by adding `.queue()` before launching an Interface or a Blocks. Here's an example:\n\n```py\napp = gr.Interface(lambda x:x, \"image\", \"image\")\napp.queue() # <-- Sets up a queue with default parameters\napp.launch()\n```\n\nIn the demo `app` above, predictions will now be sent over a websocket instead.\nUnlike POST requests, websockets do not timeout and they allow bidirectional traffic. On the Gradio server, a **queue** is set up, which adds each request that comes to a list. When a worker is free, the first available request is passed into the worker for inference. When the inference is complete, the queue sends the prediction back through the websocket to the particular Gradio user who called that prediction. \n\nNote: If you host your Gradio app on [Hugging Face Spaces](https://hf.space), the queue is already **enabled by default**. You can still call the `.queue()` method manually in order to configure the queue parameters described below.\n\n## Queuing Parameters\n\nThere are several parameters that can be used to configure the queue and help reduce latency. Let's go through them one-by-one.\n\n### The `concurrency_count` parameter\n\nThe first parameter we will explore is the `concurrency_count` parameter of `queue()`. This parameter is used to set the number of worker threads in the Gradio server that will be processing your requests in parallel. By default, this parameter is set to `1` but increasing this can **linearly multiply the capacity of your server to handle requests**.\n\nSo why not set this parameter much higher? Keep in mind that since requests are processed in parallel, each request will consume memory to store the data and weights for processing. This means that you might get out-of-memory errors if you increase the `concurrency_count` too high. You may also start to get diminishing returns if the `concurrency_count` is too high because of costs of switching between different worker threads.\n\n**Recommendation**: Increase the `concurrency_count` parameter as high as you can while you continue to see performance gains or until you hit memory limits on your machine. You can [read about Hugging Face Spaces machine specs here](https://huggingface.co/docs/hub/spaces-overview). \n\n*Note*: there is a second parameter which controls the *total* number of threads that Gradio can generate, whether or not queuing is enabled. This is the `max_threads` parameter in the `launch()` method. When you increase the `concurrency_count` parameter in `queue()`, this is automatically increased as well. However, in some cases, you may want to manually increase this, e.g. if queuing is not enabled. \n\n### The `max_size` parameter\n\nA more blunt way to reduce the wait times is simply to prevent too many people from joining the queue in the first place. You can set the maximum number of requests that the queue processes using the `max_size` parameter of `queue()`. If a request arrives when the queue is already of the maximum size, it will not be allowed to join the queue and instead, the user will receive an error saying that the queue is full and to try again. By default, `max_size=None`, meaning that there is no limit to the number of users that can join the queue.\n\nParadoxically, setting a `max_size` can often improve user experience because it prevents users from being dissuaded by very long queue wait times. Users who are more interested and invested in your demo will keep trying to join the queue, and will be able to get their results faster. \n\n**Recommendation**: For a better user experience, set a `max_size` that is reasonable given your expectations of how long users might be willing to wait for a prediction. \n\n### The `max_batch_size` parameter\n\nAnother way to increase the parallelism of your Gradio demo is to write your function so that it can accept **batches** of inputs. Most deep learning models can process batches of samples more efficiently than processing individual samples. \n\nIf you write your function to process a batch of samples, Gradio will automatically batch incoming requests together and pass them into your function as a batch of samples. You need to set `batch` to `True` (by default it is `False`) and set a `max_batch_size` (by default it is `4`) based on the maximum number of samples your function is able to handle. These two parameters can be passed into `gr.Interface()` or to an event in Blocks such as `.click()`. \n\nWhile setting a batch is conceptually similar to having workers process requests in parallel, it is often *faster* than setting the `concurrency_count` for deep learning models. The downside is that you might need to adapt your function a little bit to accept batches of samples instead of individual samples. \n\nHere's an example of a function that does *not* accept a batch of inputs -- it processes a single input at a time:\n\n```py\nimport time\n\ndef trim_words(word, length):\n return w[:int(length)]\n\n```\n\nHere's the same function rewritten to take in a batch of samples:\n\n```py\nimport time\n\ndef trim_words(words, lengths):\n trimmed_words = []\n for w, l in zip(words, lengths):\n trimmed_words.append(w[:int(l)]) \n return [trimmed_words]\n\n```\n\nThe second function can be used with `batch=True` and an appropriate `max_batch_size` parameter.\n\n**Recommendation**: If possible, write your function to accept batches of samples, and then set `batch` to `True` and the `max_batch_size` as high as possible based on your machine's memory limits. If you set `max_batch_size` as high as possible, you will most likely need to set `concurrency_count` back to `1` since you will no longer have the memory to have multiple workers running in parallel. \n\n### The `api_open` parameter\n\nWhen creating a Gradio demo, you may want to restrict all traffic to happen through the user interface as opposed to the [programmatic API](/guides/sharing-your-app/#api-page) that is automatically created for your Gradio demo. This is important because when people make requests through the programmatic API, they can potentially bypass users who are waiting in the queue and degrade the experience of these users. \n\n**Recommendation**: set the `api_open` parameter in `queue()` to `False` in your demo to prevent programmatic requests.\n\n\n\n### Upgrading your Hardware (GPUs, TPUs, etc.)\n\nIf you have done everything above, and your demo is still not fast enough, you can upgrade the hardware that your model is running on. Changing the model from running on CPUs to running on GPUs will usually provide a 10x-50x increase in inference time for deep learning models.\n\nIt is particularly straightforward to upgrade your Hardware on Hugging Face Spaces. Simply click on the \"Settings\" tab in your Space and choose the Space Hardware you'd like.\n\n![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/spaces-gpu-settings.png)\n\nWhile you might need to adapt portions of your machine learning inference code to run on a GPU (here's a [handy guide](https://cnvrg.io/pytorch-cuda/) if you are using PyTorch), Gradio is completely agnostic to the choice of hardware and will work completely fine if you use it with CPUs, GPUs, TPUs, or any other hardware!\n\nNote: your GPU memory is different than your CPU memory, so if you upgrade your hardware,\nyou might need to adjust the value of the `concurrency_count` parameter described above.\n\n## Conclusion\n\nCongratulations! You know how to set up a Gradio demo for maximum performance. Good luck on your next viral demo! \n\n", "html": "

Setting Up a Demo for Maximum Performance

\n\n

Let's say that your Gradio demo goes viral on social media -- you have lots of users trying it out simultaneously, and you want to provide your users with the best possible experience or, in other words, minimize the amount of time that each user has to wait in the queue to see their prediction.

\n\n

How can you configure your Gradio demo to handle the most traffic? In this Guide, we dive into some of the parameters of Gradio's .queue() method as well as some other related configurations, and discuss how to set these parameters in a way that allows you to serve lots of users simultaneously with minimal latency.

\n\n

This is an advanced guide, so make sure you know the basics of Gradio already, such as how to create and launch a Gradio Interface. Most of the information in this Guide is relevant whether you are hosting your demo on Hugging Face Spaces or on your own server.

\n\n

Enabling Gradio's Queueing System

\n\n

By default, a Gradio demo does not use queueing and instead sends prediction requests via a POST request to the server where your Gradio server and Python code are running. However, regular POST requests have two big limitations:

\n\n

(1) They time out -- most browsers raise a timeout error\nif they do not get a response to a POST request after a short period of time (e.g. 1 min).\nThis can be a problem if your inference function takes longer than 1 minute to run or\nif many people are trying out your demo at the same time, resulting in increased latency.

\n\n

(2) They do not allow bi-directional communication between the Gradio demo and the Gradio server. This means, for example, that you cannot get a real-time ETA of how long your prediction will take to complete.

\n\n

To address these limitations, any Gradio app can be converted to use websockets instead, simply by adding .queue() before launching an Interface or a Blocks. Here's an example:

\n\n
app = gr.Interface(lambda x:x, \"image\", \"image\")\napp.queue()  # <-- Sets up a queue with default parameters\napp.launch()\n
\n\n

In the demo app above, predictions will now be sent over a websocket instead.\nUnlike POST requests, websockets do not timeout and they allow bidirectional traffic. On the Gradio server, a queue is set up, which adds each request that comes to a list. When a worker is free, the first available request is passed into the worker for inference. When the inference is complete, the queue sends the prediction back through the websocket to the particular Gradio user who called that prediction.

\n\n

Note: If you host your Gradio app on Hugging Face Spaces, the queue is already enabled by default. You can still call the .queue() method manually in order to configure the queue parameters described below.

\n\n

Queuing Parameters

\n\n

There are several parameters that can be used to configure the queue and help reduce latency. Let's go through them one-by-one.

\n\n

The concurrency_count parameter

\n\n

The first parameter we will explore is the concurrency_count parameter of queue(). This parameter is used to set the number of worker threads in the Gradio server that will be processing your requests in parallel. By default, this parameter is set to 1 but increasing this can linearly multiply the capacity of your server to handle requests.

\n\n

So why not set this parameter much higher? Keep in mind that since requests are processed in parallel, each request will consume memory to store the data and weights for processing. This means that you might get out-of-memory errors if you increase the concurrency_count too high. You may also start to get diminishing returns if the concurrency_count is too high because of costs of switching between different worker threads.

\n\n

Recommendation: Increase the concurrency_count parameter as high as you can while you continue to see performance gains or until you hit memory limits on your machine. You can read about Hugging Face Spaces machine specs here.

\n\n

Note: there is a second parameter which controls the total number of threads that Gradio can generate, whether or not queuing is enabled. This is the max_threads parameter in the launch() method. When you increase the concurrency_count parameter in queue(), this is automatically increased as well. However, in some cases, you may want to manually increase this, e.g. if queuing is not enabled.

\n\n

The max_size parameter

\n\n

A more blunt way to reduce the wait times is simply to prevent too many people from joining the queue in the first place. You can set the maximum number of requests that the queue processes using the max_size parameter of queue(). If a request arrives when the queue is already of the maximum size, it will not be allowed to join the queue and instead, the user will receive an error saying that the queue is full and to try again. By default, max_size=None, meaning that there is no limit to the number of users that can join the queue.

\n\n

Paradoxically, setting a max_size can often improve user experience because it prevents users from being dissuaded by very long queue wait times. Users who are more interested and invested in your demo will keep trying to join the queue, and will be able to get their results faster.

\n\n

Recommendation: For a better user experience, set a max_size that is reasonable given your expectations of how long users might be willing to wait for a prediction.

\n\n

The max_batch_size parameter

\n\n

Another way to increase the parallelism of your Gradio demo is to write your function so that it can accept batches of inputs. Most deep learning models can process batches of samples more efficiently than processing individual samples.

\n\n

If you write your function to process a batch of samples, Gradio will automatically batch incoming requests together and pass them into your function as a batch of samples. You need to set batch to True (by default it is False) and set a max_batch_size (by default it is 4) based on the maximum number of samples your function is able to handle. These two parameters can be passed into gr.Interface() or to an event in Blocks such as .click().

\n\n

While setting a batch is conceptually similar to having workers process requests in parallel, it is often faster than setting the concurrency_count for deep learning models. The downside is that you might need to adapt your function a little bit to accept batches of samples instead of individual samples.

\n\n

Here's an example of a function that does not accept a batch of inputs -- it processes a single input at a time:

\n\n
import time\n\ndef trim_words(word, length):\n    return w[:int(length)]\n\n
\n\n

Here's the same function rewritten to take in a batch of samples:

\n\n
import time\n\ndef trim_words(words, lengths):\n    trimmed_words = []\n    for w, l in zip(words, lengths):\n        trimmed_words.append(w[:int(l)])        \n    return [trimmed_words]\n\n
\n\n

The second function can be used with batch=True and an appropriate max_batch_size parameter.

\n\n

Recommendation: If possible, write your function to accept batches of samples, and then set batch to True and the max_batch_size as high as possible based on your machine's memory limits. If you set max_batch_size as high as possible, you will most likely need to set concurrency_count back to 1 since you will no longer have the memory to have multiple workers running in parallel.

\n\n

The api_open parameter

\n\n

When creating a Gradio demo, you may want to restrict all traffic to happen through the user interface as opposed to the programmatic API that is automatically created for your Gradio demo. This is important because when people make requests through the programmatic API, they can potentially bypass users who are waiting in the queue and degrade the experience of these users.

\n\n

Recommendation: set the api_open parameter in queue() to False in your demo to prevent programmatic requests.

\n\n

Upgrading your Hardware (GPUs, TPUs, etc.)

\n\n

If you have done everything above, and your demo is still not fast enough, you can upgrade the hardware that your model is running on. Changing the model from running on CPUs to running on GPUs will usually provide a 10x-50x increase in inference time for deep learning models.

\n\n

It is particularly straightforward to upgrade your Hardware on Hugging Face Spaces. Simply click on the \"Settings\" tab in your Space and choose the Space Hardware you'd like.

\n\n

\"\"

\n\n

While you might need to adapt portions of your machine learning inference code to run on a GPU (here's a handy guide if you are using PyTorch), Gradio is completely agnostic to the choice of hardware and will work completely fine if you use it with CPUs, GPUs, TPUs, or any other hardware!

\n\n

Note: your GPU memory is different than your CPU memory, so if you upgrade your hardware,\nyou might need to adjust the value of the concurrency_count parameter described above.

\n\n

Conclusion

\n\n

Congratulations! You know how to set up a Gradio demo for maximum performance. Good luck on your next viral demo!

\n", "tags": ["QUEUE", "PERFORMANCE"], "spaces": [], "url": "/guides/setting-up-a-demo-for-maximum-performance/", "contributor": null}, {"name": "theming-guide", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 44, "pretty_name": "Theming Guide", "content": "# Theming\n\n\n## Introduction\n\nGradio features a built-in theming engine that lets you customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Blocks` or `Interface` constructor. For example:\n\n```python\nwith gr.Blocks(theme=gr.themes.Soft()) as demo:\n ...\n```\n\n
\n\n
\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. These are:\n\n* `gr.themes.Base()`\n* `gr.themes.Default()`\n* `gr.themes.Glass()`\n* `gr.themes.Monochrome()`\n* `gr.themes.Soft()`\n\nEach of these themes set values for hundreds of CSS variables. You can use prebuilt themes as a starting point for your own custom themes, or you can create your own themes from scratch. Let's take a look at each approach.\n\n## Using the Theme Builder\n\nThe easiest way to build a theme is using the Theme Builder. To launch the Theme Builder locally, run the following code:\n\n```python\nimport gradio as gr\n\ngr.themes.builder()\n```\n\n\n\nYou can use the Theme Builder running on Spaces above, though it runs much faster when you launch it locally via `gr.themes.builder()`. \n\nAs you edit the values in the Theme Builder, the app will preview updates in real time. You can download the code to generate the theme you've created so you can use it in any Gradio app.\n\nIn the rest of the guide, we will cover building themes programmatically.\n\n## Extending Themes via the Constructor\n\nAlthough each theme has hundreds of CSS variables, the values for most these variables are drawn from 8 core variables which can be set through the constructor of each prebuilt theme. Modifying these 8 arguments allows you to quickly change the look and feel of your app.\n\n### Core Colors\n\nThe first 3 constructor arguments set the colors of the theme and are `gradio.themes.Color` objects. Internally, these Color objects hold brightness values for the palette of a single hue, ranging from 50, 100, 200..., 800, 900, 950. Other CSS variables are derived from these 3 colors.\n\nThe 3 color constructor arguments are:\n\n- `primary_hue`: This is the color draws attention in your theme. In the default theme, this is set to `gradio.themes.colors.orange`.\n- `secondary_hue`: This is the color that is used for secondary elements in your theme. In the default theme, this is set to `gradio.themes.colors.blue`.\n- `neutral_hue`: This is the color that is used for text and other neutral elements in your theme. In the default theme, this is set to `gradio.themes.colors.gray`.\n\nYou could modify these values using their string shortcuts, such as\n\n```python\nwith gr.Blocks(theme=gr.themes.Default(primary_hue=\"red\", secondary_hue=\"pink\")) as demo:\n ...\n```\n\nor you could use the `Color` objects directly, like this:\n\n```python\nwith gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.red, secondary_hue=gr.themes.colors.pink)) as demo:\n ...\n```\n
\n\n
\n\nPredefined colors are:\n\n* `slate`\n* `gray`\n* `zinc`\n* `neutral`\n* `stone`\n* `red`\n* `orange`\n* `amber`\n* `yellow`\n* `lime`\n* `green`\n* `emerald`\n* `teal`\n* `cyan`\n* `sky`\n* `blue`\n* `indigo`\n* `violet`\n* `purple`\n* `fuchsia`\n* `pink`\n* `rose`\n\nYou could also create your own custom `Color` objects and pass them in.\n\n### Core Sizing\n\nThe next 3 constructor arguments set the sizing of the theme and are `gradio.themes.Size` objects. Internally, these Size objects hold pixel size values that range from `xxs` to `xxl`. Other CSS variables are derived from these 3 sizes.\n\n- `spacing_size`: This sets the padding within and spacing between elements. In the default theme, this is set to `gradio.themes.sizes.spacing_md`.\n- `radius_size`: This sets the roundedness of corners of elements. In the default theme, this is set to `gradio.themes.sizes.radius_md`.\n- `text_size`: This sets the font size of text. In the default theme, this is set to `gradio.themes.sizes.text_md`.\n\nYou could modify these values using their string shortcuts, such as\n\n```python\nwith gr.Blocks(theme=gr.themes.Default(spacing_size=\"sm\", radius_size=\"none\")) as demo:\n ...\n```\n\nor you could use the `Size` objects directly, like this:\n\n```python\nwith gr.Blocks(theme=gr.themes.Default(spacing_size=gr.themes.sizes.spacing_sm, radius_size=gr.themes.sizes.radius_none)) as demo:\n ...\n```\n
\n\n
\n\nThe predefined size objects are:\n\n* `radius_none`\n* `radius_sm`\n* `radius_md`\n* `radius_lg`\n* `spacing_sm`\n* `spacing_md`\n* `spacing_lg`\n* `text_sm`\n* `text_md`\n* `text_lg`\n\nYou could also create your own custom `Size` objects and pass them in.\n\n### Core Fonts\n\nThe final 2 constructor arguments set the fonts of the theme. You can pass a list of fonts to each of these arguments to specify fallbacks. If you provide a string, it will be loaded as a system font. If you provide a `gradio.themes.GoogleFont`, the font will be loaded from Google Fonts.\n\n- `font`: This sets the primary font of the theme. In the default theme, this is set to `gradio.themes.GoogleFont(\"Source Sans Pro\")`.\n- `font_mono`: This sets the monospace font of the theme. In the default theme, this is set to `gradio.themes.GoogleFont(\"IBM Plex Mono\")`.\n\nYou could modify these values such as the following:\n\n```python\nwith gr.Blocks(theme=gr.themes.Default(font=[gr.themes.GoogleFont(\"Inconsolata\"), \"Arial\", \"sans-serif\"])) as demo:\n ...\n```\n\n
\n\n
\n\n\n## Extending Themes via `.set()`\n\nYou can also modify the values of CSS variables after the theme has been loaded. To do so, use the `.set()` method of the theme object to get access to the CSS variables. For example:\n\n```python\ntheme = gr.themes.Default(primary_hue=\"blue\").set(\n loader_color=\"#FF0000\",\n slider_color=\"#FF0000\",\n)\n\nwith gr.Blocks(theme=theme) as demo:\n ...\n```\n\nIn the example above, we've set the `loader_color` and `slider_color` variables to `#FF0000`, despite the overall `primary_color` using the blue color palette. You can set any CSS variable that is defined in the theme in this manner. \n\nYour IDE type hinting should help you navigate these variables. Since there are so many CSS variables, let's take a look at how these variables are named and organized.\n\n### CSS Variable Naming Conventions\n\nCSS variable names can get quite long, like `button_primary_background_fill_hover_dark`! However they follow a common naming convention that makes it easy to understand what they do and to find the variable you're looking for. Separated by underscores, the variable name is made up of:\n\n1. The target element, such as `button`, `slider`, or `block`.\n2. The target element type or sub-element, such as `button_primary`, or `block_label`.\n3. The property, such as `button_primary_background_fill`, or `block_label_border_width`.\n4. Any relevant state, such as `button_primary_background_fill_hover`.\n5. If the value is different in dark mode, the suffix `_dark`. For example, `input_border_color_focus_dark`.\n\nOf course, many CSS variable names are shorter than this, such as `table_border_color`, or `input_shadow`. \n\n### CSS Variable Organization\n\nThough there are hundreds of CSS variables, they do not all have to have individual values. They draw their values by referencing a set of core variables and referencing each other. This allows us to only have to modify a few variables to change the look and feel of the entire theme, while also getting finer control of individual elements that we may want to modify.\n\n#### Referencing Core Variables\n\nTo reference one of the core constructor variables, precede the variable name with an asterisk. To reference a core color, use the `*primary_`, `*secondary_`, or `*neutral_` prefix, followed by the brightness value. For example:\n\n```python\ntheme = gr.themes.Default(primary_hue=\"blue\").set(\n button_primary_background_fill=\"*primary_200\",\n button_primary_background_fill_hover=\"*primary_300\",\n)\n```\n\nIn the example above, we've set the `button_primary_background_fill` and `button_primary_background_fill_hover` variables to `*primary_200` and `*primary_300`. These variables will be set to the 200 and 300 brightness values of the blue primary color palette, respectively.\n\nSimilarly, to reference a core size, use the `*spacing_`, `*radius_`, or `*text_` prefix, followed by the size value. For example:\n\n```python\ntheme = gr.themes.Default(radius_size=\"md\").set(\n button_primary_border_radius=\"*radius_xl\",\n)\n```\n\nIn the example above, we've set the `button_primary_border_radius` variable to `*radius_xl`. This variable will be set to the `xl` setting of the medium radius size range.\n\n#### Referencing Other Variables\n\nVariables can also reference each other. For example, look at the example below:\n\n```python\ntheme = gr.themes.Default().set(\n button_primary_background_fill=\"#FF0000\",\n button_primary_background_fill_hover=\"#FF0000\",\n button_primary_border=\"#FF0000\",\n)\n```\n\nHaving to set these values to a common color is a bit tedious. Instead, we can reference the `button_primary_background_fill` variable in the `button_primary_background_fill_hover` and `button_primary_border` variables, using a `*` prefix. \n\n```python\ntheme = gr.themes.Default().set(\n button_primary_background_fill=\"#FF0000\",\n button_primary_background_fill_hover=\"*button_primary_background_fill\",\n button_primary_border=\"*button_primary_background_fill\",\n)\n```\n\nNow, if we change the `button_primary_background_fill` variable, the `button_primary_background_fill_hover` and `button_primary_border` variables will automatically update as well.\n\nThis is particularly useful if you intend to share your theme - it makes it easy to modify the theme without having to change every variable.\n\nNote that dark mode variables automatically reference each other. For example:\n\n```python\ntheme = gr.themes.Default().set(\n button_primary_background_fill=\"#FF0000\",\n button_primary_background_fill_dark=\"#AAAAAA\",\n button_primary_border=\"*button_primary_background_fill\",\n button_primary_border_dark=\"*button_primary_background_fill_dark\",\n)\n```\n\n`button_primary_border_dark` will draw its value from `button_primary_background_fill_dark`, because dark mode always draw from the dark version of the variable.\n\n## Creating a Full Theme\n\nLet's say you want to create a theme from scratch! We'll go through it step by step - you can also see the source of prebuilt themes in the gradio source repo for reference - [here's the source](https://github.com/gradio-app/gradio/blob/main/gradio/themes/monochrome.py) for the Monochrome theme.\n\nOur new theme class will inherit from `gradio.themes.Base`, a theme that sets a lot of convenient defaults. Let's make a simple demo that creates a dummy theme called Seafoam, and make a simple app that uses it.\n\n```python\nimport gradio as gr\nfrom gradio.themes.base import Base\nimport time\n\nclass Seafoam(Base):\n pass\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n textbox = gr.Textbox(label=\"Name\")\n slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n with gr.Row():\n button = gr.Button(\"Submit\", variant=\"primary\")\n clear = gr.Button(\"Clear\")\n output = gr.Textbox(label=\"Output\")\n\n def repeat(name, count):\n time.sleep(3)\n return name * count\n \n button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n```\n\n
\n\n
\n\n\nThe Base theme is very barebones, and uses `gr.themes.Blue` as it primary color - you'll note the primary button and the loading animation are both blue as a result. Let's change the defaults core arguments of our app. We'll overwrite the constructor and pass new defaults for the core constructor arguments.\n\nWe'll use `gr.themes.Emerald` as our primary color, and set secondary and neutral hues to `gr.themes.Blue`. We'll make our text larger using `text_lg`. We'll use `Quicksand` as our default font, loaded from Google Fonts. \n\n```python\nfrom __future__ import annotations\nfrom typing import Iterable\nimport gradio as gr\nfrom gradio.themes.base import Base\nfrom gradio.themes.utils import colors, fonts, sizes\nimport time\n\n\nclass Seafoam(Base):\n def __init__(\n self,\n *,\n primary_hue: colors.Color | str = colors.emerald,\n secondary_hue: colors.Color | str = colors.blue,\n neutral_hue: colors.Color | str = colors.gray,\n spacing_size: sizes.Size | str = sizes.spacing_md,\n radius_size: sizes.Size | str = sizes.radius_md,\n text_size: sizes.Size | str = sizes.text_lg,\n font: fonts.Font\n | str\n | Iterable[fonts.Font | str] = (\n fonts.GoogleFont(\"Quicksand\"),\n \"ui-sans-serif\",\n \"sans-serif\",\n ),\n font_mono: fonts.Font\n | str\n | Iterable[fonts.Font | str] = (\n fonts.GoogleFont(\"IBM Plex Mono\"),\n \"ui-monospace\",\n \"monospace\",\n ),\n ):\n super().__init__(\n primary_hue=primary_hue,\n secondary_hue=secondary_hue,\n neutral_hue=neutral_hue,\n spacing_size=spacing_size,\n radius_size=radius_size,\n text_size=text_size,\n font=font,\n font_mono=font_mono,\n )\n\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n textbox = gr.Textbox(label=\"Name\")\n slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n with gr.Row():\n button = gr.Button(\"Submit\", variant=\"primary\")\n clear = gr.Button(\"Clear\")\n output = gr.Textbox(label=\"Output\")\n\n def repeat(name, count):\n time.sleep(3)\n return name * count\n\n button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n\n```\n\n
\n\n
\n\nSee how the primary button and the loading animation are now green? These CSS variables are tied to the `primary_hue` variable. \n\nLet's modify the theme a bit more directly. We'll call the `set()` method to overwrite CSS variable values explicitly. We can use any CSS logic, and reference our core constructor arguments using the `*` prefix.\n\n```python\nfrom __future__ import annotations\nfrom typing import Iterable\nimport gradio as gr\nfrom gradio.themes.base import Base\nfrom gradio.themes.utils import colors, fonts, sizes\nimport time\n\n\nclass Seafoam(Base):\n def __init__(\n self,\n *,\n primary_hue: colors.Color | str = colors.emerald,\n secondary_hue: colors.Color | str = colors.blue,\n neutral_hue: colors.Color | str = colors.blue,\n spacing_size: sizes.Size | str = sizes.spacing_md,\n radius_size: sizes.Size | str = sizes.radius_md,\n text_size: sizes.Size | str = sizes.text_lg,\n font: fonts.Font\n | str\n | Iterable[fonts.Font | str] = (\n fonts.GoogleFont(\"Quicksand\"),\n \"ui-sans-serif\",\n \"sans-serif\",\n ),\n font_mono: fonts.Font\n | str\n | Iterable[fonts.Font | str] = (\n fonts.GoogleFont(\"IBM Plex Mono\"),\n \"ui-monospace\",\n \"monospace\",\n ),\n ):\n super().__init__(\n primary_hue=primary_hue,\n secondary_hue=secondary_hue,\n neutral_hue=neutral_hue,\n spacing_size=spacing_size,\n radius_size=radius_size,\n text_size=text_size,\n font=font,\n font_mono=font_mono,\n )\n super().set(\n body_background_fill=\"repeating-linear-gradient(45deg, *primary_200, *primary_200 10px, *primary_50 10px, *primary_50 20px)\",\n body_background_fill_dark=\"repeating-linear-gradient(45deg, *primary_800, *primary_800 10px, *primary_900 10px, *primary_900 20px)\",\n button_primary_background_fill=\"linear-gradient(90deg, *primary_300, *secondary_400)\",\n button_primary_background_fill_hover=\"linear-gradient(90deg, *primary_200, *secondary_300)\",\n button_primary_text_color=\"white\",\n button_primary_background_fill_dark=\"linear-gradient(90deg, *primary_600, *secondary_800)\",\n slider_color=\"*secondary_300\",\n slider_color_dark=\"*secondary_600\",\n block_title_text_weight=\"600\",\n block_border_width=\"3px\",\n block_shadow=\"*shadow_drop_lg\",\n button_shadow=\"*shadow_drop_lg\",\n button_large_padding=\"32px\",\n )\n\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n textbox = gr.Textbox(label=\"Name\")\n slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n with gr.Row():\n button = gr.Button(\"Submit\", variant=\"primary\")\n clear = gr.Button(\"Clear\")\n output = gr.Textbox(label=\"Output\")\n\n def repeat(name, count):\n time.sleep(3)\n return name * count\n\n button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n\n```\n
\n\n
\n\n\nLook how fun our theme looks now! With just a few variable changes, our theme looks completely different.\n\nYou may find it helpful to explore the [source code of the other prebuilt themes](https://github.com/gradio-app/gradio/blob/main/gradio/themes) to see how they modified the base theme. You can also find your browser's Inspector useful to select elements from the UI and see what CSS variables are being used in the styles panel. \n\n## Sharing Themes\n\nOnce you have created a theme, you can upload it to the HuggingFace Hub to let others view it, use it, and build off of it!\n\n### Uploading a Theme\nThere are two ways to upload a theme, via the theme class instance or the command line. We will cover both of them with the previously created `seafoam` theme.\n\n* Via the class instance\n\nEach theme instance has a method called `push_to_hub` we can use to upload a theme to the HuggingFace hub.\n\n```python\nseafoam.push_to_hub(repo_name=\"seafoam\",\n version=\"0.0.1\",\n\t\t\t\t\thf_token=\"\")\n```\n\n* Via the command line\n\nFirst save the theme to disk\n```python\nseafoam.dump(filename=\"seafoam.json\")\n```\n\nThen use the `upload_theme` command:\n\n```bash\nupload_theme\\\n\"seafoam.json\"\\\n\"seafoam\"\\\n--version \"0.0.1\"\\\n--hf_token \"\"\n```\n\nIn order to upload a theme, you must have a HuggingFace account and pass your [Access Token](https://huggingface.co/docs/huggingface_hub/quick-start#login)\nas the `hf_token` argument. However, if you log in via the [HuggingFace command line](https://huggingface.co/docs/huggingface_hub/quick-start#login) (which comes installed with `gradio`),\nyou can omit the `hf_token` argument.\n\nThe `version` argument lets you specify a valid [semantic version](https://www.geeksforgeeks.org/introduction-semantic-versioning/) string for your theme.\nThat way your users are able to specify which version of your theme they want to use in their apps. This also lets you publish updates to your theme without worrying\nabout changing how previously created apps look. The `version` argument is optional. If omitted, the next patch version is automatically applied.\n\n### Theme Previews\n\nBy calling `push_to_hub` or `upload_theme`, the theme assets will be stored in a [HuggingFace space](https://huggingface.co/docs/hub/spaces-overview).\n\nThe theme preview for our seafoam theme is here: [seafoam preview](https://huggingface.co/spaces/gradio/seafoam).\n\n
\n\n
\n\n### Discovering Themes\n\nThe [Theme Gallery](https://huggingface.co/spaces/gradio/theme-gallery) shows all the public gradio themes. After publishing your theme,\nit will automatically show up in the theme gallery after a couple of minutes. \n\nYou can sort the themes by the number of likes on the space and from most to least recently created as well as toggling themes between light and dark mode.\n\n
\n\n
\n\n### Downloading\nTo use a theme from the hub, use the `from_hub` method on the `ThemeClass` and pass it to your app:\n\n```python\nmy_theme = gr.Theme.from_hub(\"gradio/seafoam\")\n\nwith gr.Blocks(theme=my_theme) as demo:\n ....\n```\n\nYou can also pass the theme string directly to `Blocks` or `Interface` (`gr.Blocks(theme=\"gradio/seafoam\")`)\n\nYou can pin your app to an upstream theme version by using semantic versioning expressions.\n\nFor example, the following would ensure the theme we load from the `seafoam` repo was between versions `0.0.1` and `0.1.0`:\n\n```python\nwith gr.Blocks(theme=\"gradio/seafoam@>=0.0.1,<0.1.0\") as demo:\n ....\n```\n\nEnjoy creating your own themes! If you make one you're proud of, please share it with the world by uploading it to the hub! \nIf you tag us on [Twitter](https://twitter.com/gradio) we can give your theme a shout out! \n\n\n", "html": "

Theming

\n\n

Introduction

\n\n

Gradio features a built-in theming engine that lets you customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Blocks or Interface constructor. For example:

\n\n
with gr.Blocks(theme=gr.themes.Soft()) as demo:\n    ...\n
\n\n
\n\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. These are:

\n\n
    \n
  • gr.themes.Base()
  • \n
  • gr.themes.Default()
  • \n
  • gr.themes.Glass()
  • \n
  • gr.themes.Monochrome()
  • \n
  • gr.themes.Soft()
  • \n
\n\n

Each of these themes set values for hundreds of CSS variables. You can use prebuilt themes as a starting point for your own custom themes, or you can create your own themes from scratch. Let's take a look at each approach.

\n\n

Using the Theme Builder

\n\n

The easiest way to build a theme is using the Theme Builder. To launch the Theme Builder locally, run the following code:

\n\n
import gradio as gr\n\ngr.themes.builder()\n
\n\n\n\nYou can use the Theme Builder running on Spaces above, though it runs much faster when you launch it locally via `gr.themes.builder()`. \n\nAs you edit the values in the Theme Builder, the app will preview updates in real time. You can download the code to generate the theme you've created so you can use it in any Gradio app.\n\nIn the rest of the guide, we will cover building themes programmatically.\n\n## Extending Themes via the Constructor\n\nAlthough each theme has hundreds of CSS variables, the values for most these variables are drawn from 8 core variables which can be set through the constructor of each prebuilt theme. Modifying these 8 arguments allows you to quickly change the look and feel of your app.\n\n### Core Colors\n\nThe first 3 constructor arguments set the colors of the theme and are `gradio.themes.Color` objects. Internally, these Color objects hold brightness values for the palette of a single hue, ranging from 50, 100, 200..., 800, 900, 950. Other CSS variables are derived from these 3 colors.\n\nThe 3 color constructor arguments are:\n\n- `primary_hue`: This is the color draws attention in your theme. In the default theme, this is set to `gradio.themes.colors.orange`.\n- `secondary_hue`: This is the color that is used for secondary elements in your theme. In the default theme, this is set to `gradio.themes.colors.blue`.\n- `neutral_hue`: This is the color that is used for text and other neutral elements in your theme. In the default theme, this is set to `gradio.themes.colors.gray`.\n\nYou could modify these values using their string shortcuts, such as\n\n
with gr.Blocks(theme=gr.themes.Default(primary_hue=\"red\", secondary_hue=\"pink\")) as demo:\n    ...\n
\n\nor you could use the `Color` objects directly, like this:\n\n
with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.red, secondary_hue=gr.themes.colors.pink)) as demo:\n    ...\n
\n
\n\n
\n\n

Predefined colors are:

\n\n
    \n
  • slate
  • \n
  • gray
  • \n
  • zinc
  • \n
  • neutral
  • \n
  • stone
  • \n
  • red
  • \n
  • orange
  • \n
  • amber
  • \n
  • yellow
  • \n
  • lime
  • \n
  • green
  • \n
  • emerald
  • \n
  • teal
  • \n
  • cyan
  • \n
  • sky
  • \n
  • blue
  • \n
  • indigo
  • \n
  • violet
  • \n
  • purple
  • \n
  • fuchsia
  • \n
  • pink
  • \n
  • rose
  • \n
\n\n

You could also create your own custom Color objects and pass them in.

\n\n

Core Sizing

\n\n

The next 3 constructor arguments set the sizing of the theme and are gradio.themes.Size objects. Internally, these Size objects hold pixel size values that range from xxs to xxl. Other CSS variables are derived from these 3 sizes.

\n\n
    \n
  • spacing_size: This sets the padding within and spacing between elements. In the default theme, this is set to gradio.themes.sizes.spacing_md.
  • \n
  • radius_size: This sets the roundedness of corners of elements. In the default theme, this is set to gradio.themes.sizes.radius_md.
  • \n
  • text_size: This sets the font size of text. In the default theme, this is set to gradio.themes.sizes.text_md.
  • \n
\n\n

You could modify these values using their string shortcuts, such as

\n\n
with gr.Blocks(theme=gr.themes.Default(spacing_size=\"sm\", radius_size=\"none\")) as demo:\n    ...\n
\n\nor you could use the `Size` objects directly, like this:\n\n
with gr.Blocks(theme=gr.themes.Default(spacing_size=gr.themes.sizes.spacing_sm, radius_size=gr.themes.sizes.radius_none)) as demo:\n    ...\n
\n
\n\n
\n\n

The predefined size objects are:

\n\n
    \n
  • radius_none
  • \n
  • radius_sm
  • \n
  • radius_md
  • \n
  • radius_lg
  • \n
  • spacing_sm
  • \n
  • spacing_md
  • \n
  • spacing_lg
  • \n
  • text_sm
  • \n
  • text_md
  • \n
  • text_lg
  • \n
\n\n

You could also create your own custom Size objects and pass them in.

\n\n

Core Fonts

\n\n

The final 2 constructor arguments set the fonts of the theme. You can pass a list of fonts to each of these arguments to specify fallbacks. If you provide a string, it will be loaded as a system font. If you provide a gradio.themes.GoogleFont, the font will be loaded from Google Fonts.

\n\n
    \n
  • font: This sets the primary font of the theme. In the default theme, this is set to gradio.themes.GoogleFont(\"Source Sans Pro\").
  • \n
  • font_mono: This sets the monospace font of the theme. In the default theme, this is set to gradio.themes.GoogleFont(\"IBM Plex Mono\").
  • \n
\n\n

You could modify these values such as the following:

\n\n
with gr.Blocks(theme=gr.themes.Default(font=[gr.themes.GoogleFont(\"Inconsolata\"), \"Arial\", \"sans-serif\"])) as demo:\n    ...\n
\n\n
\n\n
\n\n

Extending Themes via .set()

\n\n

You can also modify the values of CSS variables after the theme has been loaded. To do so, use the .set() method of the theme object to get access to the CSS variables. For example:

\n\n
theme = gr.themes.Default(primary_hue=\"blue\").set(\n    loader_color=\"#FF0000\",\n    slider_color=\"#FF0000\",\n)\n\nwith gr.Blocks(theme=theme) as demo:\n    ...\n
\n\nIn the example above, we've set the `loader_color` and `slider_color` variables to `#FF0000`, despite the overall `primary_color` using the blue color palette. You can set any CSS variable that is defined in the theme in this manner. \n\nYour IDE type hinting should help you navigate these variables. Since there are so many CSS variables, let's take a look at how these variables are named and organized.\n\n### CSS Variable Naming Conventions\n\nCSS variable names can get quite long, like `button_primary_background_fill_hover_dark`! However they follow a common naming convention that makes it easy to understand what they do and to find the variable you're looking for. Separated by underscores, the variable name is made up of:\n\n1. The target element, such as `button`, `slider`, or `block`.\n2. The target element type or sub-element, such as `button_primary`, or `block_label`.\n3. The property, such as `button_primary_background_fill`, or `block_label_border_width`.\n4. Any relevant state, such as `button_primary_background_fill_hover`.\n5. If the value is different in dark mode, the suffix `_dark`. For example, `input_border_color_focus_dark`.\n\nOf course, many CSS variable names are shorter than this, such as `table_border_color`, or `input_shadow`. \n\n### CSS Variable Organization\n\nThough there are hundreds of CSS variables, they do not all have to have individual values. They draw their values by referencing a set of core variables and referencing each other. This allows us to only have to modify a few variables to change the look and feel of the entire theme, while also getting finer control of individual elements that we may want to modify.\n\n#### Referencing Core Variables\n\nTo reference one of the core constructor variables, precede the variable name with an asterisk. To reference a core color, use the `*primary_`, `*secondary_`, or `*neutral_` prefix, followed by the brightness value. For example:\n\n
theme = gr.themes.Default(primary_hue=\"blue\").set(\n    button_primary_background_fill=\"*primary_200\",\n    button_primary_background_fill_hover=\"*primary_300\",\n)\n
\n\nIn the example above, we've set the `button_primary_background_fill` and `button_primary_background_fill_hover` variables to `*primary_200` and `*primary_300`. These variables will be set to the 200 and 300 brightness values of the blue primary color palette, respectively.\n\nSimilarly, to reference a core size, use the `*spacing_`, `*radius_`, or `*text_` prefix, followed by the size value. For example:\n\n
theme = gr.themes.Default(radius_size=\"md\").set(\n    button_primary_border_radius=\"*radius_xl\",\n)\n
\n\nIn the example above, we've set the `button_primary_border_radius` variable to `*radius_xl`. This variable will be set to the `xl` setting of the medium radius size range.\n\n#### Referencing Other Variables\n\nVariables can also reference each other. For example, look at the example below:\n\n
theme = gr.themes.Default().set(\n    button_primary_background_fill=\"#FF0000\",\n    button_primary_background_fill_hover=\"#FF0000\",\n    button_primary_border=\"#FF0000\",\n)\n
\n\nHaving to set these values to a common color is a bit tedious. Instead, we can reference the `button_primary_background_fill` variable in the `button_primary_background_fill_hover` and `button_primary_border` variables, using a `*` prefix. \n\n
theme = gr.themes.Default().set(\n    button_primary_background_fill=\"#FF0000\",\n    button_primary_background_fill_hover=\"*button_primary_background_fill\",\n    button_primary_border=\"*button_primary_background_fill\",\n)\n
\n\nNow, if we change the `button_primary_background_fill` variable, the `button_primary_background_fill_hover` and `button_primary_border` variables will automatically update as well.\n\nThis is particularly useful if you intend to share your theme - it makes it easy to modify the theme without having to change every variable.\n\nNote that dark mode variables automatically reference each other. For example:\n\n
theme = gr.themes.Default().set(\n    button_primary_background_fill=\"#FF0000\",\n    button_primary_background_fill_dark=\"#AAAAAA\",\n    button_primary_border=\"*button_primary_background_fill\",\n    button_primary_border_dark=\"*button_primary_background_fill_dark\",\n)\n
\n\n`button_primary_border_dark` will draw its value from `button_primary_background_fill_dark`, because dark mode always draw from the dark version of the variable.\n\n## Creating a Full Theme\n\nLet's say you want to create a theme from scratch! We'll go through it step by step - you can also see the source of prebuilt themes in the gradio source repo for reference - [here's the source](https://github.com/gradio-app/gradio/blob/main/gradio/themes/monochrome.py) for the Monochrome theme.\n\nOur new theme class will inherit from `gradio.themes.Base`, a theme that sets a lot of convenient defaults. Let's make a simple demo that creates a dummy theme called Seafoam, and make a simple app that uses it.\n\n
import gradio as gr\nfrom gradio.themes.base import Base\nimport time\n\nclass Seafoam(Base):\n    pass\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n    textbox = gr.Textbox(label=\"Name\")\n    slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n    with gr.Row():\n        button = gr.Button(\"Submit\", variant=\"primary\")\n        clear = gr.Button(\"Clear\")\n    output = gr.Textbox(label=\"Output\")\n\n    def repeat(name, count):\n        time.sleep(3)\n        return name * count\n\n    button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n
\n\n
\n\n
\n\n

The Base theme is very barebones, and uses gr.themes.Blue as it primary color - you'll note the primary button and the loading animation are both blue as a result. Let's change the defaults core arguments of our app. We'll overwrite the constructor and pass new defaults for the core constructor arguments.

\n\n

We'll use gr.themes.Emerald as our primary color, and set secondary and neutral hues to gr.themes.Blue. We'll make our text larger using text_lg. We'll use Quicksand as our default font, loaded from Google Fonts.

\n\n
from __future__ import annotations\nfrom typing import Iterable\nimport gradio as gr\nfrom gradio.themes.base import Base\nfrom gradio.themes.utils import colors, fonts, sizes\nimport time\n\n\nclass Seafoam(Base):\n    def __init__(\n        self,\n        *,\n        primary_hue: colors.Color | str = colors.emerald,\n        secondary_hue: colors.Color | str = colors.blue,\n        neutral_hue: colors.Color | str = colors.gray,\n        spacing_size: sizes.Size | str = sizes.spacing_md,\n        radius_size: sizes.Size | str = sizes.radius_md,\n        text_size: sizes.Size | str = sizes.text_lg,\n        font: fonts.Font\n        | str\n        | Iterable[fonts.Font | str] = (\n            fonts.GoogleFont(\"Quicksand\"),\n            \"ui-sans-serif\",\n            \"sans-serif\",\n        ),\n        font_mono: fonts.Font\n        | str\n        | Iterable[fonts.Font | str] = (\n            fonts.GoogleFont(\"IBM Plex Mono\"),\n            \"ui-monospace\",\n            \"monospace\",\n        ),\n    ):\n        super().__init__(\n            primary_hue=primary_hue,\n            secondary_hue=secondary_hue,\n            neutral_hue=neutral_hue,\n            spacing_size=spacing_size,\n            radius_size=radius_size,\n            text_size=text_size,\n            font=font,\n            font_mono=font_mono,\n        )\n\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n    textbox = gr.Textbox(label=\"Name\")\n    slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n    with gr.Row():\n        button = gr.Button(\"Submit\", variant=\"primary\")\n        clear = gr.Button(\"Clear\")\n    output = gr.Textbox(label=\"Output\")\n\n    def repeat(name, count):\n        time.sleep(3)\n        return name * count\n\n    button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n\n
\n\n
\n\n
\n\n

See how the primary button and the loading animation are now green? These CSS variables are tied to the primary_hue variable.

\n\n

Let's modify the theme a bit more directly. We'll call the set() method to overwrite CSS variable values explicitly. We can use any CSS logic, and reference our core constructor arguments using the * prefix.

\n\n
from __future__ import annotations\nfrom typing import Iterable\nimport gradio as gr\nfrom gradio.themes.base import Base\nfrom gradio.themes.utils import colors, fonts, sizes\nimport time\n\n\nclass Seafoam(Base):\n    def __init__(\n        self,\n        *,\n        primary_hue: colors.Color | str = colors.emerald,\n        secondary_hue: colors.Color | str = colors.blue,\n        neutral_hue: colors.Color | str = colors.blue,\n        spacing_size: sizes.Size | str = sizes.spacing_md,\n        radius_size: sizes.Size | str = sizes.radius_md,\n        text_size: sizes.Size | str = sizes.text_lg,\n        font: fonts.Font\n        | str\n        | Iterable[fonts.Font | str] = (\n            fonts.GoogleFont(\"Quicksand\"),\n            \"ui-sans-serif\",\n            \"sans-serif\",\n        ),\n        font_mono: fonts.Font\n        | str\n        | Iterable[fonts.Font | str] = (\n            fonts.GoogleFont(\"IBM Plex Mono\"),\n            \"ui-monospace\",\n            \"monospace\",\n        ),\n    ):\n        super().__init__(\n            primary_hue=primary_hue,\n            secondary_hue=secondary_hue,\n            neutral_hue=neutral_hue,\n            spacing_size=spacing_size,\n            radius_size=radius_size,\n            text_size=text_size,\n            font=font,\n            font_mono=font_mono,\n        )\n        super().set(\n            body_background_fill=\"repeating-linear-gradient(45deg, *primary_200, *primary_200 10px, *primary_50 10px, *primary_50 20px)\",\n            body_background_fill_dark=\"repeating-linear-gradient(45deg, *primary_800, *primary_800 10px, *primary_900 10px, *primary_900 20px)\",\n            button_primary_background_fill=\"linear-gradient(90deg, *primary_300, *secondary_400)\",\n            button_primary_background_fill_hover=\"linear-gradient(90deg, *primary_200, *secondary_300)\",\n            button_primary_text_color=\"white\",\n            button_primary_background_fill_dark=\"linear-gradient(90deg, *primary_600, *secondary_800)\",\n            slider_color=\"*secondary_300\",\n            slider_color_dark=\"*secondary_600\",\n            block_title_text_weight=\"600\",\n            block_border_width=\"3px\",\n            block_shadow=\"*shadow_drop_lg\",\n            button_shadow=\"*shadow_drop_lg\",\n            button_large_padding=\"32px\",\n        )\n\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n    textbox = gr.Textbox(label=\"Name\")\n    slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n    with gr.Row():\n        button = gr.Button(\"Submit\", variant=\"primary\")\n        clear = gr.Button(\"Clear\")\n    output = gr.Textbox(label=\"Output\")\n\n    def repeat(name, count):\n        time.sleep(3)\n        return name * count\n\n    button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n\n
\n
\n\n
\n\n

Look how fun our theme looks now! With just a few variable changes, our theme looks completely different.

\n\n

You may find it helpful to explore the source code of the other prebuilt themes to see how they modified the base theme. You can also find your browser's Inspector useful to select elements from the UI and see what CSS variables are being used in the styles panel.

\n\n

Sharing Themes

\n\n

Once you have created a theme, you can upload it to the HuggingFace Hub to let others view it, use it, and build off of it!

\n\n

Uploading a Theme

\n\n

There are two ways to upload a theme, via the theme class instance or the command line. We will cover both of them with the previously created seafoam theme.

\n\n
    \n
  • Via the class instance
  • \n
\n\n

Each theme instance has a method called push_to_hub we can use to upload a theme to the HuggingFace hub.

\n\n
seafoam.push_to_hub(repo_name=\"seafoam\",\n                    version=\"0.0.1\",\n                    hf_token=\"\")\n
\n\n* Via the command line\n\nFirst save the theme to disk\n
seafoam.dump(filename=\"seafoam.json\")\n
\n\nThen use the `upload_theme` command:\n\n
upload_theme\\\n\"seafoam.json\"\\\n\"seafoam\"\\\n--version \"0.0.1\"\\\n--hf_token \"\"\n
\n\nIn order to upload a theme, you must have a HuggingFace account and pass your [Access Token](https://huggingface.co/docs/huggingface_hub/quick-start#login)\nas the `hf_token` argument. However, if you log in via the [HuggingFace command line](https://huggingface.co/docs/huggingface_hub/quick-start#login) (which comes installed with `gradio`),\nyou can omit the `hf_token` argument.\n\nThe `version` argument lets you specify a valid [semantic version](https://www.geeksforgeeks.org/introduction-semantic-versioning/) string for your theme.\nThat way your users are able to specify which version of your theme they want to use in their apps. This also lets you publish updates to your theme without worrying\nabout changing how previously created apps look. The `version` argument is optional. If omitted, the next patch version is automatically applied.\n\n### Theme Previews\n\nBy calling `push_to_hub` or `upload_theme`, the theme assets will be stored in a [HuggingFace space](https://huggingface.co/docs/hub/spaces-overview).\n\nThe theme preview for our seafoam theme is here: [seafoam preview](https://huggingface.co/spaces/gradio/seafoam).\n\n
\n\n
\n\n

Discovering Themes

\n\n

The Theme Gallery shows all the public gradio themes. After publishing your theme,\nit will automatically show up in the theme gallery after a couple of minutes.

\n\n

You can sort the themes by the number of likes on the space and from most to least recently created as well as toggling themes between light and dark mode.

\n\n
\n\n
\n\n

Downloading

\n\n

To use a theme from the hub, use the from_hub method on the ThemeClass and pass it to your app:

\n\n
my_theme = gr.Theme.from_hub(\"gradio/seafoam\")\n\nwith gr.Blocks(theme=my_theme) as demo:\n    ....\n
\n\n

You can also pass the theme string directly to Blocks or Interface (gr.Blocks(theme=\"gradio/seafoam\"))

\n\n

You can pin your app to an upstream theme version by using semantic versioning expressions.

\n\n

For example, the following would ensure the theme we load from the seafoam repo was between versions 0.0.1 and 0.1.0:

\n\n
with gr.Blocks(theme=\"gradio/seafoam@>=0.0.1,<0.1.0\") as demo:\n    ....\n
\n\n

Enjoy creating your own themes! If you make one you're proud of, please share it with the world by uploading it to the hub! \nIf you tag us on Twitter we can give your theme a shout out!

\n\n

\n", "tags": ["THEMES"], "spaces": [], "url": "/guides/theming-guide/", "contributor": null}, {"name": "using-flagging", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 45, "pretty_name": "Using Flagging", "content": "# Using Flagging\n\n\n\n\n## Introduction\n\nWhen you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.\n\nGradio simplifies the collection of this data by including a **Flag** button with every `Interface`. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with `gradio.Interface` as well as with `gradio.Blocks`.\n\n## The **Flag** button in `gradio.Interface`\n\nFlagging with Gradio's `Interface` is especially easy. By default, underneath the output components, there is a button marked **Flag**. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.\n\nThere are [four parameters](https://gradio.app/docs/#interface-header) in `gradio.Interface` that control how flagging works. We will go over them in greater detail.\n\n* `allow_flagging`: this parameter can be set to either `\"manual\"` (default), `\"auto\"`, or `\"never\"`. \n * `manual`: users will see a button to flag, and samples are only flagged when the button is clicked.\n * `auto`: users will not see a button to flag, but every sample will be flagged automatically. \n * `never`: users will not see a button to flag, and no sample will be flagged. \n* `flagging_options`: this parameter can be either `None` (default) or a list of strings.\n * If `None`, then the user simply clicks on the **Flag** button and no additional options are shown.\n * If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is `[\"Incorrect\", \"Ambiguous\"]`, then buttons labeled **Flag as Incorrect** and **Flag as Ambiguous** appear. This only applies if `allow_flagging` is `\"manual\"`.\n * The chosen option is then logged along with the input and output.\n* `flagging_dir`: this parameter takes a string.\n * It represents what to name the directory where flagged data is stored.\n* `flagging_callback`: this parameter takes an instance of a subclass of the `FlaggingCallback` class\n * Using this parameter allows you to write custom code that gets run when the flag button is clicked\n * By default, this is set to an instance of `gr.CSVLogger`\n * One example is setting it to an instance of `gr.HuggingFaceDatasetSaver` which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)\n\n## What happens to flagged data?\n\nWithin the directory provided by the `flagging_dir` argument, a CSV file will log the flagged data. \n\nHere's an example: The code below creates the calculator interface embedded below it:\n\n```python\nimport gradio as gr\n\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n return num1 / num2\n\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\"\n)\n\niface.launch()\n```\n\n\n\nWhen you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged. \n\n```directory\n+-- flagged/\n| +-- logs.csv\n```\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n```\n\nIf the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an `image` input to `image` output interface will create the following structure.\n\n```directory\n+-- flagged/\n| +-- logs.csv\n| +-- image/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n_flagged/logs.csv_\n```csv\nim,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.\n\nIf we go back to the calculator example, the following code will create the interface embedded below it. \n```python\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n```\n\n\nWhen users click the flag button, the csv file will now include a column indicating the selected option.\n\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n```\n\n## The HuggingFaceDatasetSaver Callback\n\nSometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.\n\nWe've made this super easy with the `flagging_callback` parameter.\n\nFor example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:\n\n\n```python\nimport os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n flagging_callback=hf_writer\n)\n\niface.launch()\n```\n\nNotice that we define our own \ninstance of `gradio.HuggingFaceDatasetSaver` using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set `allow_flagging=\"manual\"`\nbecause on Hugging Face Spaces, `allow_flagging` is set to `\"never\"` by default. Here's our demo:\n\n\n\nYou can now see all the examples flagged above in this [public Hugging Face dataset](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo).\n\n![flagging callback hf](https://github.com/gradio-app/gradio/blob/main/guides/assets/flagging-callback-hf.png?raw=true)\n\nWe created the `gradio.HuggingFaceDatasetSaver` class, but you can pass your own custom class as long as it inherits from `FLaggingCallback` defined in [this file](https://github.com/gradio-app/gradio/blob/master/gradio/flagging.py). If you create a cool callback, contribute it to the repo! \n\n## Flagging with Blocks\n\nWhat about if you are using `gradio.Blocks`? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.\n\nAt the same time, you might want to use an existing `FlaggingCallback` to avoid writing extra code.\nThis requires two steps:\n\n1. You have to run your callback's `.setup()` somewhere in the code prior to the \nfirst time you flag data\n2. When the flagging button is clicked, then you trigger the callback's `.flag()` method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing. \n\nHere is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default `CSVLogger`:\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n sepia_filter = strength * np.array(\n [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n ) + (1-strength) * np.identity(3)\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n img_input = gr.Image()\n strength = gr.Slider(0, 1, 0.5)\n img_output = gr.Image()\n with gr.Row():\n btn = gr.Button(\"Flag\")\n \n # This needs to be called at some point prior to the first call to callback.flag()\n callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n img_input.change(sepia, [img_input, strength], img_output)\n strength.change(sepia, [img_input, strength], img_output)\n \n # We can choose which components to flag -- in this case, we'll flag all of them\n btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n```\n\n\n## Privacy\n\nImportant Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use `allow_flagging=auto` (when all of the data submitted through the demo is being flagged)\n\n### That's all! Happy building :) \n", "html": "

Using Flagging

\n\n

Introduction

\n\n

When you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.

\n\n

Gradio simplifies the collection of this data by including a Flag button with every Interface. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with gradio.Interface as well as with gradio.Blocks.

\n\n

The Flag button in gradio.Interface

\n\n

Flagging with Gradio's Interface is especially easy. By default, underneath the output components, there is a button marked Flag. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.

\n\n

There are four parameters in gradio.Interface that control how flagging works. We will go over them in greater detail.

\n\n
    \n
  • allow_flagging: this parameter can be set to either \"manual\" (default), \"auto\", or \"never\".
    \n
      \n
    • manual: users will see a button to flag, and samples are only flagged when the button is clicked.
    • \n
    • auto: users will not see a button to flag, but every sample will be flagged automatically.
    • \n
    • never: users will not see a button to flag, and no sample will be flagged.
    • \n
  • \n
  • flagging_options: this parameter can be either None (default) or a list of strings.\n
      \n
    • If None, then the user simply clicks on the Flag button and no additional options are shown.
    • \n
    • If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is [\"Incorrect\", \"Ambiguous\"], then buttons labeled Flag as Incorrect and Flag as Ambiguous appear. This only applies if allow_flagging is \"manual\".
    • \n
    • The chosen option is then logged along with the input and output.
    • \n
  • \n
  • flagging_dir: this parameter takes a string.\n
      \n
    • It represents what to name the directory where flagged data is stored.
    • \n
  • \n
  • flagging_callback: this parameter takes an instance of a subclass of the FlaggingCallback class\n
      \n
    • Using this parameter allows you to write custom code that gets run when the flag button is clicked
    • \n
    • By default, this is set to an instance of gr.CSVLogger
    • \n
    • One example is setting it to an instance of gr.HuggingFaceDatasetSaver which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)
    • \n
  • \n
\n\n

What happens to flagged data?

\n\n

Within the directory provided by the flagging_dir argument, a CSV file will log the flagged data.

\n\n

Here's an example: The code below creates the calculator interface embedded below it:

\n\n
import gradio as gr\n\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        return num1 / num2\n\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\"\n)\n\niface.launch()\n
\n\n

\n\n

When you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged.

\n\n
+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n
\n\n

If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an image input to image output interface will create the following structure.

\n\n
+-- flagged/\n|   +-- logs.csv\n|   +-- image/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.

\n\n

If we go back to the calculator example, the following code will create the interface embedded below it.

\n\n
iface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n
\n\n

\n\n

When users click the flag button, the csv file will now include a column indicating the selected option.

\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n
\n\n

The HuggingFaceDatasetSaver Callback

\n\n

Sometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.

\n\n

We've made this super easy with the flagging_callback parameter.

\n\n

For example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:

\n\n
import os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n    flagging_callback=hf_writer\n)\n\niface.launch()\n
\n\n

Notice that we define our own \ninstance of gradio.HuggingFaceDatasetSaver using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set allow_flagging=\"manual\"\nbecause on Hugging Face Spaces, allow_flagging is set to \"never\" by default. Here's our demo:

\n\n

\n\n

You can now see all the examples flagged above in this public Hugging Face dataset.

\n\n

\"flagging

\n\n

We created the gradio.HuggingFaceDatasetSaver class, but you can pass your own custom class as long as it inherits from FLaggingCallback defined in this file. If you create a cool callback, contribute it to the repo!

\n\n

Flagging with Blocks

\n\n

What about if you are using gradio.Blocks? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.

\n\n

At the same time, you might want to use an existing FlaggingCallback to avoid writing extra code.\nThis requires two steps:

\n\n
    \n
  1. You have to run your callback's .setup() somewhere in the code prior to the \nfirst time you flag data
  2. \n
  3. When the flagging button is clicked, then you trigger the callback's .flag() method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing.
  4. \n
\n\n

Here is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default CSVLogger:

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n    sepia_filter = strength * np.array(\n        [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n    ) + (1-strength) * np.identity(3)\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            img_input = gr.Image()\n            strength = gr.Slider(0, 1, 0.5)\n        img_output = gr.Image()\n    with gr.Row():\n        btn = gr.Button(\"Flag\")\n\n    # This needs to be called at some point prior to the first call to callback.flag()\n    callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n    img_input.change(sepia, [img_input, strength], img_output)\n    strength.change(sepia, [img_input, strength], img_output)\n\n    # We can choose which components to flag -- in this case, we'll flag all of them\n    btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n
\n\n

\n\n

Privacy

\n\n

Important Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use allow_flagging=auto (when all of the data submitted through the demo is being flagged)

\n\n

That's all! Happy building :)

\n", "tags": ["FLAGGING", "DATA"], "spaces": ["https://huggingface.co/spaces/gradio/calculator-flagging-crowdsourced", "https://huggingface.co/spaces/gradio/calculator-flagging-options", "https://huggingface.co/spaces/gradio/calculator-flag-basic"], "url": "/guides/using-flagging/", "contributor": null}]}]} +{"guides_by_category": [{"category": "Getting Started", "guides": [{"name": "quickstart", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 1, "absolute_index": 0, "pretty_name": "Quickstart", "content": "# Quickstart\n\n**Prerequisite**: Gradio requires Python 3.8 or higher, that's all!\n\n## What Does Gradio Do?\n\nOne of the *best ways to share* your machine learning model, API, or data science workflow with others is to create an **interactive app** that allows your users or colleagues to try out the demo in their browsers.\n\nGradio allows you to **build demos and share them, all in Python.** And usually in just a few lines of code! So let's get started.\n\n## Hello, World\n\nTo get Gradio running with a simple \"Hello, World\" example, follow these three steps:\n\n1\\. Install Gradio using pip:\n\n```bash\npip install gradio\n```\n\n2\\. Run the code below as a Python script or in a Jupyter Notebook (or [Google Colab](https://colab.research.google.com/drive/18ODkJvyxHutTN0P5APWyGFO_xwNcgHDZ?usp=sharing)):\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n \ndemo.launch() \n```\n\nWe shorten the imported name to `gr` for better readability of code using Gradio. This is a widely adopted convention that you should follow so that anyone working with your code can easily understand it.\n\n3\\. The demo below will appear automatically within the Jupyter Notebook, or pop in a browser on [http://localhost:7860](http://localhost:7860) if running from a script:\n\n\n\nWhen developing locally, if you want to run the code as a Python script, you can use the Gradio CLI to launch the application **in reload mode**, which will provide seamless and fast development. Learn more about reloading in the [Auto-Reloading Guide](https://gradio.app/developing-faster-with-reload-mode/).\n\n```bash\ngradio app.py\n```\n\nNote: you can also do `python app.py`, but it won't provide the automatic reload mechanism.\n\n## The `Interface` Class\n\nYou'll notice that in order to make the demo, we created a `gr.Interface`. This `Interface` class can wrap any Python function with a user interface. In the example above, we saw a simple text-based function, but the function could be anything from music generator to a tax calculator to the prediction function of a pretrained machine learning model.\n\nThe core `Interface` class is initialized with three required parameters:\n\n- `fn`: the function to wrap a UI around\n- `inputs`: which component(s) to use for the input (e.g. `\"text\"`, `\"image\"` or `\"audio\"`)\n- `outputs`: which component(s) to use for the output (e.g. `\"text\"`, `\"image\"` or `\"label\"`)\n\nLet's take a closer look at these components used to provide input and output.\n\n## Components Attributes\n\nWe saw some simple `Textbox` components in the previous examples, but what if you want to change how the UI components look or behave?\n\nLet's say you want to customize the input text field \u2014 for example, you wanted it to be larger and have a text placeholder. If we use the actual class for `Textbox` instead of using the string shortcut, you have access to much more customizability through component attributes.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(\n fn=greet,\n inputs=gr.Textbox(lines=2, placeholder=\"Name Here...\"),\n outputs=\"text\",\n)\ndemo.launch()\n\n```\n\n\n## Multiple Input and Output Components\n\nSuppose you had a more complex function, with multiple inputs and outputs. In the example below, we define a function that takes a string, boolean, and number, and returns a string and number. Take a look how you pass a list of input and output components.\n\n```python\nimport gradio as gr\n\ndef greet(name, is_morning, temperature):\n salutation = \"Good morning\" if is_morning else \"Good evening\"\n greeting = f\"{salutation} {name}. It is {temperature} degrees today\"\n celsius = (temperature - 32) * 5 / 9\n return greeting, round(celsius, 2)\n\ndemo = gr.Interface(\n fn=greet,\n inputs=[\"text\", \"checkbox\", gr.Slider(0, 100)],\n outputs=[\"text\", \"number\"],\n)\ndemo.launch()\n\n```\n\n\nYou simply wrap the components in a list. Each component in the `inputs` list corresponds to one of the parameters of the function, in order. Each component in the `outputs` list corresponds to one of the values returned by the function, again in order.\n\n## An Image Example\n\nGradio supports many types of components, such as `Image`, `DataFrame`, `Video`, or `Label`. Let's try an image-to-image function to get a feel for these!\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n sepia_filter = np.array([\n [0.393, 0.769, 0.189], \n [0.349, 0.686, 0.168], \n [0.272, 0.534, 0.131]\n ])\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n```\n\n\nWhen using the `Image` component as input, your function will receive a NumPy array with the shape `(height, width, 3)`, where the last dimension represents the RGB values. We'll return an image as well in the form of a NumPy array.\n\nYou can also set the datatype used by the component with the `type=` keyword argument. For example, if you wanted your function to take a file path to an image instead of a NumPy array, the input `Image` component could be written as:\n\n```python\ngr.Image(type=\"filepath\", shape=...)\n```\n\nAlso note that our input `Image` component comes with an edit button \ud83d\udd89, which allows for cropping and zooming into images. Manipulating images in this way can help reveal biases or hidden flaws in a machine learning model!\n\nYou can read more about the many components and how to use them in the [Gradio docs](https://gradio.app/docs).\n\n## Chatbots\n\nGradio includes a high-level class, `gr.ChatInterface`, which is similar to `gr.Interface`, but is specifically designed for chatbot UIs. The `gr.ChatInterface` class also wraps a function but this function must have a specific signature. The function should take two arguments: `message` and then `history` (the arguments can be named anything, but must be in this order)\n\n* `message`: a `str` representing the user's input\n* `history`: a `list` of `list` representing the conversations up until that point. Each inner list consists of two `str` representing a pair: `[user input, bot response]`. \n\nYour function should return a single string response, which is the bot's response to the particular user input `message`.\n\nOther than that, `gr.ChatInterface` has no required parameters (though several are available for customization of the UI).\n\nHere's a toy example:\n\n```python\nimport random\nimport gradio as gr\n\ndef random_response(message, history):\n return random.choice([\"Yes\", \"No\"])\n\ndemo = gr.ChatInterface(random_response)\n\ndemo.launch()\n\n```\n\n\nYou can [read more about `gr.ChatInterface` here](https://gradio.app/guides/creating-a-chatbot-fast).\n\n## Blocks: More Flexibility and Control\n\nGradio offers two approaches to build apps:\n\n1\\. **Interface** and **ChatInterface**, which provide a high-level abstraction for creating demos that we've been discussing so far.\n\n2\\. **Blocks**, a low-level API for designing web apps with more flexible layouts and data flows. Blocks allows you to do things like feature multiple data flows and demos, control where components appear on the page, handle complex data flows (e.g. outputs can serve as inputs to other functions), and update properties/visibility of components based on user interaction \u2014 still all in Python. If this customizability is what you need, try `Blocks` instead!\n\n## Hello, Blocks\n\nLet's take a look at a simple example. Note how the API here differs from `Interface`.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n name = gr.Textbox(label=\"Name\")\n output = gr.Textbox(label=\"Output Box\")\n greet_btn = gr.Button(\"Greet\")\n greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n \n\ndemo.launch()\n```\n\n\nThings to note:\n\n- `Blocks` are made with a `with` clause, and any component created inside this clause is automatically added to the app.\n- Components appear vertically in the app in the order they are created. (Later we will cover customizing layouts!)\n- A `Button` was created, and then a `click` event-listener was added to this button. The API for this should look familiar! Like an `Interface`, the `click` method takes a Python function, input components, and output components.\n\n## More Complexity\n\nHere's an app to give you a taste of what's possible with `Blocks`:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nA lot more going on here! We'll cover how to create complex `Blocks` apps like this in the [building with blocks](https://gradio.app/building_with_blocks) section for you.\n\nCongrats, you're now familiar with the basics of Gradio! \ud83e\udd73 Go to our [next guide](https://gradio.app/key_features) to learn more about the key features of Gradio.\n", "html": "

Quickstart

\n\n

Prerequisite: Gradio requires Python 3.8 or higher, that's all!

\n\n

What Does Gradio Do?

\n\n

One of the best ways to share your machine learning model, API, or data science workflow with others is to create an interactive app that allows your users or colleagues to try out the demo in their browsers.

\n\n

Gradio allows you to build demos and share them, all in Python. And usually in just a few lines of code! So let's get started.

\n\n

Hello, World

\n\n

To get Gradio running with a simple \"Hello, World\" example, follow these three steps:

\n\n

1. Install Gradio using pip:

\n\n
pip install gradio\n
\n\n

2. Run the code below as a Python script or in a Jupyter Notebook (or Google Colab):

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n\ndemo.launch()   \n
\n\n

We shorten the imported name to gr for better readability of code using Gradio. This is a widely adopted convention that you should follow so that anyone working with your code can easily understand it.

\n\n

3. The demo below will appear automatically within the Jupyter Notebook, or pop in a browser on http://localhost:7860 if running from a script:

\n\n

\n\n

When developing locally, if you want to run the code as a Python script, you can use the Gradio CLI to launch the application in reload mode, which will provide seamless and fast development. Learn more about reloading in the Auto-Reloading Guide.

\n\n
gradio app.py\n
\n\n

Note: you can also do python app.py, but it won't provide the automatic reload mechanism.

\n\n

The Interface Class

\n\n

You'll notice that in order to make the demo, we created a gr.Interface. This Interface class can wrap any Python function with a user interface. In the example above, we saw a simple text-based function, but the function could be anything from music generator to a tax calculator to the prediction function of a pretrained machine learning model.

\n\n

The core Interface class is initialized with three required parameters:

\n\n
    \n
  • fn: the function to wrap a UI around
  • \n
  • inputs: which component(s) to use for the input (e.g. \"text\", \"image\" or \"audio\")
  • \n
  • outputs: which component(s) to use for the output (e.g. \"text\", \"image\" or \"label\")
  • \n
\n\n

Let's take a closer look at these components used to provide input and output.

\n\n

Components Attributes

\n\n

We saw some simple Textbox components in the previous examples, but what if you want to change how the UI components look or behave?

\n\n

Let's say you want to customize the input text field \u2014 for example, you wanted it to be larger and have a text placeholder. If we use the actual class for Textbox instead of using the string shortcut, you have access to much more customizability through component attributes.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(\n    fn=greet,\n    inputs=gr.Textbox(lines=2, placeholder=\"Name Here...\"),\n    outputs=\"text\",\n)\ndemo.launch()\n\n
\n\n

\n\n

Multiple Input and Output Components

\n\n

Suppose you had a more complex function, with multiple inputs and outputs. In the example below, we define a function that takes a string, boolean, and number, and returns a string and number. Take a look how you pass a list of input and output components.

\n\n
import gradio as gr\n\ndef greet(name, is_morning, temperature):\n    salutation = \"Good morning\" if is_morning else \"Good evening\"\n    greeting = f\"{salutation} {name}. It is {temperature} degrees today\"\n    celsius = (temperature - 32) * 5 / 9\n    return greeting, round(celsius, 2)\n\ndemo = gr.Interface(\n    fn=greet,\n    inputs=[\"text\", \"checkbox\", gr.Slider(0, 100)],\n    outputs=[\"text\", \"number\"],\n)\ndemo.launch()\n\n
\n\n

\n\n

You simply wrap the components in a list. Each component in the inputs list corresponds to one of the parameters of the function, in order. Each component in the outputs list corresponds to one of the values returned by the function, again in order.

\n\n

An Image Example

\n\n

Gradio supports many types of components, such as Image, DataFrame, Video, or Label. Let's try an image-to-image function to get a feel for these!

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n    sepia_filter = np.array([\n        [0.393, 0.769, 0.189], \n        [0.349, 0.686, 0.168], \n        [0.272, 0.534, 0.131]\n    ])\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n
\n\n

\n\n

When using the Image component as input, your function will receive a NumPy array with the shape (height, width, 3), where the last dimension represents the RGB values. We'll return an image as well in the form of a NumPy array.

\n\n

You can also set the datatype used by the component with the type= keyword argument. For example, if you wanted your function to take a file path to an image instead of a NumPy array, the input Image component could be written as:

\n\n
gr.Image(type=\"filepath\", shape=...)\n
\n\n

Also note that our input Image component comes with an edit button \ud83d\udd89, which allows for cropping and zooming into images. Manipulating images in this way can help reveal biases or hidden flaws in a machine learning model!

\n\n

You can read more about the many components and how to use them in the Gradio docs.

\n\n

Chatbots

\n\n

Gradio includes a high-level class, gr.ChatInterface, which is similar to gr.Interface, but is specifically designed for chatbot UIs. The gr.ChatInterface class also wraps a function but this function must have a specific signature. The function should take two arguments: message and then history (the arguments can be named anything, but must be in this order)

\n\n
    \n
  • message: a str representing the user's input
  • \n
  • history: a list of list representing the conversations up until that point. Each inner list consists of two str representing a pair: [user input, bot response].
  • \n
\n\n

Your function should return a single string response, which is the bot's response to the particular user input message.

\n\n

Other than that, gr.ChatInterface has no required parameters (though several are available for customization of the UI).

\n\n

Here's a toy example:

\n\n
import random\nimport gradio as gr\n\ndef random_response(message, history):\n    return random.choice([\"Yes\", \"No\"])\n\ndemo = gr.ChatInterface(random_response)\n\ndemo.launch()\n\n
\n\n

\n\n

You can read more about gr.ChatInterface here.

\n\n

Blocks: More Flexibility and Control

\n\n

Gradio offers two approaches to build apps:

\n\n

1. Interface and ChatInterface, which provide a high-level abstraction for creating demos that we've been discussing so far.

\n\n

2. Blocks, a low-level API for designing web apps with more flexible layouts and data flows. Blocks allows you to do things like feature multiple data flows and demos, control where components appear on the page, handle complex data flows (e.g. outputs can serve as inputs to other functions), and update properties/visibility of components based on user interaction \u2014 still all in Python. If this customizability is what you need, try Blocks instead!

\n\n

Hello, Blocks

\n\n

Let's take a look at a simple example. Note how the API here differs from Interface.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n    name = gr.Textbox(label=\"Name\")\n    output = gr.Textbox(label=\"Output Box\")\n    greet_btn = gr.Button(\"Greet\")\n    greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n\n\ndemo.launch()\n
\n\n

\n\n

Things to note:

\n\n
    \n
  • Blocks are made with a with clause, and any component created inside this clause is automatically added to the app.
  • \n
  • Components appear vertically in the app in the order they are created. (Later we will cover customizing layouts!)
  • \n
  • A Button was created, and then a click event-listener was added to this button. The API for this should look familiar! Like an Interface, the click method takes a Python function, input components, and output components.
  • \n
\n\n

More Complexity

\n\n

Here's an app to give you a taste of what's possible with Blocks:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

A lot more going on here! We'll cover how to create complex Blocks apps like this in the building with blocks section for you.

\n\n

Congrats, you're now familiar with the basics of Gradio! \ud83e\udd73 Go to our next guide to learn more about the key features of Gradio.

\n", "tags": [], "spaces": [], "url": "/guides/quickstart/", "contributor": null}, {"name": "key-features", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 2, "absolute_index": 1, "pretty_name": "Key Features", "content": "# Key Features\n\nLet's go through some of the most popular features of Gradio! Here are Gradio's key features:\n\n1. [Adding example inputs](#example-inputs)\n2. [Passing custom error messages](#alerts)\n3. [Adding descriptive content](#descriptive-content)\n4. [Setting up flagging](#flagging)\n5. [Preprocessing and postprocessing](#preprocessing-and-postprocessing)\n6. [Styling demos](#styling)\n7. [Queuing users](#queuing)\n8. [Iterative outputs](#iterative-outputs)\n9. [Progress bars](#progress-bars)\n10. [Batch functions](#batch-functions)\n11. [Running on collaborative notebooks](#colab-notebooks)\n\n## Example Inputs\n\nYou can provide example data that a user can easily load into `Interface`. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a **nested list** to the `examples=` keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the [Docs](https://gradio.app/docs#components).\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n if num2 == 0:\n raise gr.Error(\"Cannot divide by zero!\")\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\", \n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n examples=[\n [5, \"add\", 3],\n [4, \"divide\", 2],\n [-4, \"multiply\", 2.5],\n [0, \"subtract\", 1.2],\n ],\n title=\"Toy Calculator\",\n description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n```\n\n\nYou can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the `examples_per_page` argument of `Interface`).\n\nContinue learning about examples in the [More On Examples](https://gradio.app/more-on-examples) guide.\n\n## Alerts\n\nYou wish to pass custom error messages to the user. To do so, raise a `gr.Error(\"custom message\")` to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the [docs](https://gradio.app/docs#error). \n\nYou can also issue `gr.Warning(\"message\")` and `gr.Info(\"message\")` by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work. \n\nNote below how the `gr.Error` has to be raised, while the `gr.Warning` and `gr.Info` are single lines.\n\n```python\ndef start_process(name):\n gr.Info(\"Starting process\")\n if name is None:\n gr.Warning(\"Name is empty\")\n ...\n if success == False:\n raise gr.Error(\"Process failed\")\n```\n \n## Descriptive Content\n\nIn the previous example, you may have noticed the `title=` and `description=` keyword arguments in the `Interface` constructor that helps users understand your app.\n\nThere are three arguments in the `Interface` constructor to specify where this content should go:\n\n* `title`: which accepts text and can display it at the very top of interface, and also becomes the page title.\n* `description`: which accepts text, markdown or HTML and places it right under the title.\n* `article`: which also accepts text, markdown or HTML and places it below the interface.\n\n![annotated](https://github.com/gradio-app/gradio/blob/main/guides/assets/annotated.png?raw=true)\n\nIf you're using the `Blocks` API instead, you can insert text, markdown, or HTML anywhere using the `gr.Markdown(...)` or `gr.HTML(...)` components, with descriptive content inside the `Component` constructor.\n\nAnother useful keyword argument is `label=`, which is present in every `Component`. This modifies the label text at the top of each `Component`. You can also add the `info=` keyword argument to form elements like `Textbox` or `Radio` to provide further information on their usage.\n\n```python\ngr.Number(label='Age', info='In years, must be greater than 0')\n```\n\n## Flagging\n\nBy default, an `Interface` will have \"Flag\" button. When a user testing your `Interface` sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the `flagging_dir=` argument to the `Interface` constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.\n\nFor example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- calculator.py\n+-- flagged/\n| +-- logs.csv\n```\n\n*flagged/logs.csv*\n\n```csv\nnum1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n```\n\nWith the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:\n\n```directory\n+-- sepia.py\n+-- flagged/\n| +-- logs.csv\n| +-- im/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n\n*flagged/logs.csv*\n\n```csv\nim,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.\n\n## Preprocessing and Postprocessing\n\n![](https://github.com/gradio-app/gradio/blob/main/js/_website/src/assets/img/dataflow.svg?raw=true)\n\nAs you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.\n\nWhen a component is used as an input, Gradio automatically handles the *preprocessing* needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a `numpy` array).\n\nSimilarly, when a component is used as an output, Gradio automatically handles the *postprocessing* needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a `Gallery` of images in base64 format).\n\nYou can control the *preprocessing* using the parameters when constructing the image component. For example, here if you instantiate the `Image` component with the following parameters, it will convert the image to the `PIL` type and reshape it to be `(100, 100)` no matter the original size that it was submitted as:\n\n```py\nimg = gr.Image(shape=(100, 100), type=\"pil\")\n```\n\nIn contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:\n\n```py\nimg = gr.Image(invert_colors=True, type=\"numpy\")\n```\n\nPostprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the `Image` a `numpy` array or a `str` filepath?) and postprocesses it into a format that can be displayed by the browser.\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the preprocessing-related parameters for each Component.\n\n## Styling\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Interface` constructor. For example:\n\n```python\ndemo = gr.Interface(..., theme=gr.themes.Monochrome())\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](https://gradio.app/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n\n```python\nwith gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nSome components can be additionally styled through the `style()` method. For example:\n\n```python\nimg = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n```\n\nTake a look at the [Docs](https://gradio.app/docs) to see all the styling options for each Component.\n\n## Queuing\n\nIf your app expects heavy traffic, use the `queue()` method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(...).queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```python\nwith gr.Blocks() as demo:\n #...\ndemo.queue()\ndemo.launch()\n```\n\nYou can control the number of requests processed at a single time as such:\n\n```python\ndemo.queue(concurrency_count=3)\n```\n\nSee the [Docs on queueing](/docs/#queue) on configuring other queuing parameters.\n\nTo specify only certain functions for queueing in Blocks:\n\n```python\nwith gr.Blocks() as demo2:\n num1 = gr.Number()\n num2 = gr.Number()\n output = gr.Number()\n gr.Button(\"Add\").click(\n lambda a, b: a + b, [num1, num2], output)\n gr.Button(\"Multiply\").click(\n lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n```\n\n## Iterative Outputs\n\nIn some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.\n\nIn such cases, you can supply a **generator** function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single `return` value, a function should `yield` a series of values instead. Usually the `yield` statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:\n\n```python\ndef my_generator(x):\n for i in range(x):\n yield i\n```\n\nYou supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:\n\n```python\nimport gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n for _ in range(steps):\n time.sleep(1)\n image = np.random.random((600, 600, 3))\n yield image\n image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n```\n\n\nNote that we've added a `time.sleep(1)` in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).\n\nSupplying a generator into Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n## Progress Bars\n\nGradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a `gr.Progress` instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the `tqdm()` method of the `Progress` instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.\n\n```python\nimport gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n progress(0, desc=\"Starting\")\n time.sleep(1)\n progress(0.05)\n new_string = \"\"\n for letter in progress.tqdm(word, desc=\"Reversing\"):\n time.sleep(0.25)\n new_string = letter + new_string\n return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n demo.queue(concurrency_count=10).launch()\n\n```\n\n\nIf you use the `tqdm` library, you can even report progress updates automatically from any `tqdm.tqdm` that already exists within your function by setting the default argument as `gr.Progress(track_tqdm=True)`!\n\n## Batch Functions\n\nGradio supports the ability to pass *batch* functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.\n\nFor example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:\n\n```py\nimport time\n\ndef trim_words(words, lens):\n trimmed_words = []\n time.sleep(5)\n for w, l in zip(words, lens):\n trimmed_words.append(w[:int(l)]) \n return [trimmed_words]\n```\n\nThe advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically *batch* incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe `batch=True` and `max_batch_size=16` -- both of these parameters can be passed\ninto event triggers or into the `Interface` class)\n\nWith `Interface`:\n\n```python\ndemo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n```\n\nWith `Blocks`:\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n word = gr.Textbox(label=\"word\")\n leng = gr.Number(label=\"leng\")\n output = gr.Textbox(label=\"Output\")\n with gr.Row():\n run = gr.Button()\n\n event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n```\n\nIn the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face `transformers` and `diffusers` models\nwork very naturally with Gradio's batch mode: here's [an example demo using diffusers to\ngenerate images in batches](https://github.com/gradio-app/gradio/blob/main/demo/diffusers_with_batching/run.py)\n\nNote: using batch functions with Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above).\n\n\n## Colab Notebooks\n\n\nGradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as [Google Colab](https://colab.research.google.com/). In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by [service worker tunneling](https://github.com/tensorflow/tensorboard/blob/master/docs/design/colab_integration.md), which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use [SSH tunneling](https://coderwall.com/p/ohk6cg/remote-access-to-ipython-notebooks-via-ssh) to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, [discussed in the next Guide](https://gradio.app/guides/sharing-your-app/#sharing-demos). ", "html": "

Key Features

\n\n

Let's go through some of the most popular features of Gradio! Here are Gradio's key features:

\n\n
    \n
  1. Adding example inputs
  2. \n
  3. Passing custom error messages
  4. \n
  5. Adding descriptive content
  6. \n
  7. Setting up flagging
  8. \n
  9. Preprocessing and postprocessing
  10. \n
  11. Styling demos
  12. \n
  13. Queuing users
  14. \n
  15. Iterative outputs
  16. \n
  17. Progress bars
  18. \n
  19. Batch functions
  20. \n
  21. Running on collaborative notebooks
  22. \n
\n\n

Example Inputs

\n\n

You can provide example data that a user can easily load into Interface. This can be helpful to demonstrate the types of inputs the model expects, as well as to provide a way to explore your dataset in conjunction with your model. To load example data, you can provide a nested list to the examples= keyword argument of the Interface constructor. Each sublist within the outer list represents a data sample, and each element within the sublist represents an input for each input component. The format of example data for each component is specified in the Docs.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        if num2 == 0:\n            raise gr.Error(\"Cannot divide by zero!\")\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\", \n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    examples=[\n        [5, \"add\", 3],\n        [4, \"divide\", 2],\n        [-4, \"multiply\", 2.5],\n        [0, \"subtract\", 1.2],\n    ],\n    title=\"Toy Calculator\",\n    description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n)\ndemo.launch()\n\n
\n\n

\n\n

You can load a large dataset into the examples to browse and interact with the dataset through Gradio. The examples will be automatically paginated (you can configure this through the examples_per_page argument of Interface).

\n\n

Continue learning about examples in the More On Examples guide.

\n\n

Alerts

\n\n

You wish to pass custom error messages to the user. To do so, raise a gr.Error(\"custom message\") to display an error message. If you try to divide by zero in the calculator demo above, a popup modal will display the custom error message. Learn more about Error in the docs.

\n\n

You can also issue gr.Warning(\"message\") and gr.Info(\"message\") by having them as standalone lines in your function, which will immediately display modals while continuing the execution of your function. Queueing needs to be enabled for this to work.

\n\n

Note below how the gr.Error has to be raised, while the gr.Warning and gr.Info are single lines.

\n\n
def start_process(name):\n    gr.Info(\"Starting process\")\n    if name is None:\n        gr.Warning(\"Name is empty\")\n    ...\n    if success == False:\n        raise gr.Error(\"Process failed\")\n
\n\n

Descriptive Content

\n\n

In the previous example, you may have noticed the title= and description= keyword arguments in the Interface constructor that helps users understand your app.

\n\n

There are three arguments in the Interface constructor to specify where this content should go:

\n\n
    \n
  • title: which accepts text and can display it at the very top of interface, and also becomes the page title.
  • \n
  • description: which accepts text, markdown or HTML and places it right under the title.
  • \n
  • article: which also accepts text, markdown or HTML and places it below the interface.
  • \n
\n\n

\"annotated\"

\n\n

If you're using the Blocks API instead, you can insert text, markdown, or HTML anywhere using the gr.Markdown(...) or gr.HTML(...) components, with descriptive content inside the Component constructor.

\n\n

Another useful keyword argument is label=, which is present in every Component. This modifies the label text at the top of each Component. You can also add the info= keyword argument to form elements like Textbox or Radio to provide further information on their usage.

\n\n
gr.Number(label='Age', info='In years, must be greater than 0')\n
\n\n

Flagging

\n\n

By default, an Interface will have \"Flag\" button. When a user testing your Interface sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for you to review. Within the directory provided by the flagging_dir= argument to the Interface constructor, a CSV file will log the flagged inputs. If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well.

\n\n

For example, with the calculator interface shown above, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- calculator.py\n+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output\n5,add,7,12\n6,subtract,1.5,4.5\n
\n\n

With the sepia interface shown earlier, we would have the flagged data stored in the flagged directory shown below:

\n\n
+-- sepia.py\n+-- flagged/\n|   +-- logs.csv\n|   +-- im/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output\nim/0.png,Output/0.png\nim/1.png,Output/1.png\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.

\n\n

Preprocessing and Postprocessing

\n\n

\"\"

\n\n

As you've seen, Gradio includes components that can handle a variety of different data types, such as images, audio, and video. Most components can be used both as inputs or outputs.

\n\n

When a component is used as an input, Gradio automatically handles the preprocessing needed to convert the data from a type sent by the user's browser (such as a base64 representation of a webcam snapshot) to a form that can be accepted by your function (such as a numpy array).

\n\n

Similarly, when a component is used as an output, Gradio automatically handles the postprocessing needed to convert the data from what is returned by your function (such as a list of image paths) to a form that can be displayed in the user's browser (such as a Gallery of images in base64 format).

\n\n

You can control the preprocessing using the parameters when constructing the image component. For example, here if you instantiate the Image component with the following parameters, it will convert the image to the PIL type and reshape it to be (100, 100) no matter the original size that it was submitted as:

\n\n
img = gr.Image(shape=(100, 100), type=\"pil\")\n
\n\n

In contrast, here we keep the original size of the image, but invert the colors before converting it to a numpy array:

\n\n
img = gr.Image(invert_colors=True, type=\"numpy\")\n
\n\n

Postprocessing is a lot easier! Gradio automatically recognizes the format of the returned data (e.g. is the Image a numpy array or a str filepath?) and postprocesses it into a format that can be displayed by the browser.

\n\n

Take a look at the Docs to see all the preprocessing-related parameters for each Component.

\n\n

Styling

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Interface constructor. For example:

\n\n
demo = gr.Interface(..., theme=gr.themes.Monochrome())\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.\nThe base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Interface(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

Some components can be additionally styled through the style() method. For example:

\n\n
img = gr.Image(\"lion.jpg\").style(height='24', rounded=False)\n
\n\n

Take a look at the Docs to see all the styling options for each Component.

\n\n

Queuing

\n\n

If your app expects heavy traffic, use the queue() method to control processing rate. This will queue up calls so only a certain number of requests are processed at a single time. Queueing uses websockets, which also prevent network timeouts, so you should use queueing if the inference time of your function is long (> 1min).

\n\n

With Interface:

\n\n
demo = gr.Interface(...).queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
with gr.Blocks() as demo:\n    #...\ndemo.queue()\ndemo.launch()\n
\n\n

You can control the number of requests processed at a single time as such:

\n\n
demo.queue(concurrency_count=3)\n
\n\n

See the Docs on queueing on configuring other queuing parameters.

\n\n

To specify only certain functions for queueing in Blocks:

\n\n
with gr.Blocks() as demo2:\n    num1 = gr.Number()\n    num2 = gr.Number()\n    output = gr.Number()\n    gr.Button(\"Add\").click(\n        lambda a, b: a + b, [num1, num2], output)\n    gr.Button(\"Multiply\").click(\n        lambda a, b: a * b, [num1, num2], output, queue=True)\ndemo2.launch()\n
\n\n

Iterative Outputs

\n\n

In some cases, you may want to stream a sequence of outputs rather than show a single output at once. For example, you might have an image generation model and you want to show the image that is generated at each step, leading up to the final image. Or you might have a chatbot which streams its response one word at a time instead of returning it all at once.

\n\n

In such cases, you can supply a generator function into Gradio instead of a regular function. Creating generators in Python is very simple: instead of a single return value, a function should yield a series of values instead. Usually the yield statement is put in some kind of loop. Here's an example of an generator that simply counts up to a given number:

\n\n
def my_generator(x):\n    for i in range(x):\n        yield i\n
\n\n

You supply a generator into Gradio the same way as you would a regular function. For example, here's a a (fake) image generation model that generates noise for several steps before outputting an image:

\n\n
import gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n    for _ in range(steps):\n        time.sleep(1)\n        image = np.random.random((600, 600, 3))\n        yield image\n    image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n    yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n\n
\n\n

\n\n

Note that we've added a time.sleep(1) in the iterator to create an artificial pause between steps so that you are able to observe the steps of the iterator (in a real image generation model, this probably wouldn't be necessary).

\n\n

Supplying a generator into Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Progress Bars

\n\n

Gradio supports the ability to create a custom Progress Bars so that you have customizability and control over the progress update that you show to the user. In order to enable this, simply add an argument to your method that has a default value of a gr.Progress instance. Then you can update the progress levels by calling this instance directly with a float between 0 and 1, or using the tqdm() method of the Progress instance to track progress over an iterable, as shown below. Queueing must be enabled for progress updates.

\n\n
import gradio as gr\nimport time\n\ndef slowly_reverse(word, progress=gr.Progress()):\n    progress(0, desc=\"Starting\")\n    time.sleep(1)\n    progress(0.05)\n    new_string = \"\"\n    for letter in progress.tqdm(word, desc=\"Reversing\"):\n        time.sleep(0.25)\n        new_string = letter + new_string\n    return new_string\n\ndemo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n\nif __name__ == \"__main__\":\n    demo.queue(concurrency_count=10).launch()\n\n
\n\n

\n\n

If you use the tqdm library, you can even report progress updates automatically from any tqdm.tqdm that already exists within your function by setting the default argument as gr.Progress(track_tqdm=True)!

\n\n

Batch Functions

\n\n

Gradio supports the ability to pass batch functions. Batch functions are just\nfunctions which take in a list of inputs and return a list of predictions.

\n\n

For example, here is a batched function that takes in two lists of inputs (a list of\nwords and a list of ints), and returns a list of trimmed words as output:

\n\n
import time\n\ndef trim_words(words, lens):\n    trimmed_words = []\n    time.sleep(5)\n    for w, l in zip(words, lens):\n        trimmed_words.append(w[:int(l)])        \n    return [trimmed_words]\n
\n\n

The advantage of using batched functions is that if you enable queuing, the Gradio\nserver can automatically batch incoming requests and process them in parallel,\npotentially speeding up your demo. Here's what the Gradio code looks like (notice\nthe batch=True and max_batch_size=16 -- both of these parameters can be passed\ninto event triggers or into the Interface class)

\n\n

With Interface:

\n\n
demo = gr.Interface(trim_words, [\"textbox\", \"number\"], [\"output\"], \n                    batch=True, max_batch_size=16)\ndemo.queue()\ndemo.launch()\n
\n\n

With Blocks:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        word = gr.Textbox(label=\"word\")\n        leng = gr.Number(label=\"leng\")\n        output = gr.Textbox(label=\"Output\")\n    with gr.Row():\n        run = gr.Button()\n\n    event = run.click(trim_words, [word, leng], output, batch=True, max_batch_size=16)\n\ndemo.queue()\ndemo.launch()\n
\n\n

In the example above, 16 requests could be processed in parallel (for a total inference\ntime of 5 seconds), instead of each request being processed separately (for a total\ninference time of 80 seconds). Many Hugging Face transformers and diffusers models\nwork very naturally with Gradio's batch mode: here's an example demo using diffusers to\ngenerate images in batches

\n\n

Note: using batch functions with Gradio requires you to enable queuing in the underlying Interface or Blocks (see the queuing section above).

\n\n

Colab Notebooks

\n\n

Gradio is able to run anywhere you run Python, including local jupyter notebooks as well as collaborative notebooks, such as Google Colab. In the case of local jupyter notebooks and Google Colab notbooks, Gradio runs on a local server which you can interact with in your browser. (Note: for Google Colab, this is accomplished by service worker tunneling, which requires cookies to be enabled in your browser.) For other remote notebooks, Gradio will also run on a server, but you will need to use SSH tunneling to view the app in your local browser. Often a simpler options is to use Gradio's built-in public links, discussed in the next Guide.

\n", "tags": [], "spaces": [], "url": "/guides/key-features/", "contributor": null}, {"name": "sharing-your-app", "category": "getting-started", "pretty_category": "Getting Started", "guide_index": 3, "absolute_index": 2, "pretty_name": "Sharing Your App", "content": "# Sharing Your App\n\nHow to share your Gradio app: \n\n1. [Sharing demos with the share parameter](#sharing-demos)\n2. [Hosting on HF Spaces](#hosting-on-hf-spaces)\n3. [Embedding hosted spaces](#embedding-hosted-spaces)\n4. [Embedding with web components](#embedding-with-web-components)\n5. [Using the API page](#api-page)\n6. [Adding authentication to the page](#authentication)\n7. [Accessing Network Requests](#accessing-the-network-request-directly)\n8. [Mounting within FastAPI](#mounting-within-another-fast-api-app)\n9. [Security](#security-and-file-access)\n\n## Sharing Demos\n\nGradio demos can be easily shared publicly by setting `share=True` in the `launch()` method. Like this:\n\n```python\ndemo.launch(share=True)\n```\n\nThis generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: **XXXXX.gradio.app**. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.\n\nKeep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set `share=False` (the default, except in colab notebooks), only a local link is created, which can be shared by [port-forwarding](https://www.ssh.com/ssh/tunneling/example) with specific users. \n\n![sharing](https://github.com/gradio-app/gradio/blob/main/guides/assets/sharing.svg?raw=true)\n\nShare links expire after 72 hours.\n\n## Hosting on HF Spaces\n\nIf you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. [Hugging Face Spaces](http://huggingface.co/spaces/) provides the infrastructure to permanently host your machine learning model for free! \n\nAfter you have [created a free Hugging Face account](https://huggingface.co/join), you have three methods to deploy your Gradio app to Hugging Face Spaces:\n\n1. From terminal: run `gradio deploy` in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on `git push`.\n\n2. From your browser: Drag and drop a folder containing your Gradio model and all related files [here](https://huggingface.co/new-space).\n\n3. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See [this guide how to host on Hugging Face Spaces](https://huggingface.co/blog/gradio-spaces) for more information. \n\n\n\nNote: Some components, like `gr.Image`, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with `show_share_button`, such as `gr.Image(show_share_button=False)`. \n\n![Image with show_share_button=True](https://github.com/gradio-app/gradio/blob/main/guides/assets/share_icon.png?raw=true)\n\n## Embedding Hosted Spaces\n\nOnce you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.\n\nThere are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:\n\n![Embed this Space dropdown option](https://github.com/gradio-app/gradio/blob/main/guides/assets/embed_this_space.png?raw=true)\n\n### Embedding with Web Components\n\nWeb components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app. \n\nTo embed with Web Components:\n\n1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using). \n\n```html\n\n```\n\n2. Add \n```html\n\n```\n\nelement where you want to place the app. Set the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:\n\n\n```html\n\n```\n\n\n\nYou can see examples of how web components look on the Gradio landing page.\n\nYou can also customize the appearance and behavior of your web component with attributes that you pass into the `` tag:\n\n* `src`: as we've seen, the `src` attributes links to the URL of the hosted Gradio demo that you would like to embed\n* `space`: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a `username/space_name` instead of a full URL. Example: `gradio/Echocardiogram-Segmentation`. If this attribute attribute is provided, then `src` does not need to be provided.\n* `control_page_title`: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default `\"false\"`)\n* `initial_height`: the initial height of the web component while it is loading the Gradio app, (by default `\"300px\"`). Note that the final height is set based on the size of the Gradio app.\n* `container`: whether to show the border frame and information about where the Space is hosted (by default `\"true\"`)\n* `info`: whether to show just the information about where the Space is hosted underneath the embedded app (by default `\"true\"`)\n* `autoscroll`: whether to autoscroll to the output when prediction has finished (by default `\"false\"`)\n* `eager`: whether to load the Gradio app as soon as the page loads (by default `\"false\"`)\n* `theme_mode`: whether to use the `dark`, `light`, or default `system` theme mode (by default `\"system\"`)\n\nHere's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px. \n\n```html\n\n```\n\n_Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as `header { ... }` and `footer { ... }` will be the most likely to cause issues._\n\n### Embedding with IFrames\n\nTo embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:\n\n```html\n\n```\n\nAgain, you can find the `src=` attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.\n\nNote: if you use IFrames, you'll probably want to add a fixed `height` attribute and set `style=\"border:0;\"` to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the `allow` attribute.\n\n## API Page\n\nYou can use almost any Gradio app as an API! In the footer of a Gradio app [like this one](https://huggingface.co/spaces/gradio/hello_world), you'll see a \"Use via API\" link. \n\n![Use via API](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/api3.gif)\n\nThis is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either [the Python client](https://gradio.app/guides/getting-started-with-the-python-client/), or [the JavaScript client](https://gradio.app/guides/getting-started-with-the-js-client/). For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.\n\nThe endpoints are automatically created when you launch a Gradio `Interface`. If you are using Gradio `Blocks`, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as\n\n```python\nbtn.click(add, [num1, num2], output, api_name=\"addition\")\n```\n\nThis will add and document the endpoint `/api/addition/` to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints. \n\n*Note*: For Gradio apps in which [queueing is enabled](https://gradio.app/guides/key-features#queuing), users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set `api_open=False` in the `queue()` method. To disable the API page altogether, set `show_api=False` in `.launch()`.\n\n## Authentication\n\nYou may wish to put an authentication page in front of your app to limit who can open your app. With the `auth=` keyword argument in the `launch()` method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":\n\n```python\ndemo.launch(auth=(\"admin\", \"pass1234\"))\n```\n\nFor more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.\n\nHere's an example of a function that accepts any login where the username and password are the same:\n\n```python\ndef same_auth(username, password):\n return username == password\ndemo.launch(auth=same_auth)\n```\n\nFor authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.\n\n## Accessing the Network Request Directly\n\nWhen a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is `gr.Request` and Gradio will pass in the network request as that parameter. Here is an example:\n\n```python\nimport gradio as gr\n\ndef echo(name, request: gr.Request):\n if request:\n print(\"Request headers dictionary:\", request.headers)\n print(\"IP address:\", request.client.host)\n return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n```\n\nNote: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then `request` will be `None`. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check `if request`.\n\n## Mounting Within Another FastAPI App\n\nIn some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with `gradio.mount_gradio_app()`.\n\nHere's a complete example:\n\n```python\nfrom fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n```\n\nNote that this approach also allows you run your Gradio apps on custom paths (`http://localhost:8000/gradio` in the example above).\n\n## Security and File Access\n\nSharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) **exposes** certain files on the host machine to users of your Gradio app. \n\nIn particular, Gradio apps ALLOW users to access to three kinds of files:\n\n* **Files in the same directory (or a subdirectory) of where the Gradio script is launched from.** For example, if the path to your gradio scripts is `/home/usr/scripts/project/app.py` and you launch it from `/home/usr/scripts/project/`, then users of your shared Gradio app will be able to access any files inside `/home/usr/scripts/project/`. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's `examples`).\n\n* **Temporary files created by Gradio.** These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable `GRADIO_TEMP_DIR` to an absolute path, such as `/home/usr/scripts/project/temp/`.\n\n* **Files that you explicitly allow via the `allowed_paths` parameter in `launch()`**. This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).\n\nGradio DOES NOT ALLOW access to:\n\n* **Dotfiles** (any files whose name begins with `'.'`) or any files that are contained in any directory whose name begins with `'.'`\n\n* **Files that you explicitly allow via the `blocked_paths` parameter in `launch()`**. You can pass in a list of additional directories or exact filepaths to the `blocked_paths` parameter in `launch()`. This parameter takes precedence over the files that Gradio exposes by default or by the `allowed_paths`.\n\n* **Any other paths on the host machine**. Users should NOT be able to access other arbitrary paths on the host. \n\nPlease make sure you are running the latest version of `gradio` for these security settings to apply. ", "html": "

Sharing Your App

\n\n

How to share your Gradio app:

\n\n
    \n
  1. Sharing demos with the share parameter
  2. \n
  3. Hosting on HF Spaces
  4. \n
  5. Embedding hosted spaces
  6. \n
  7. Embedding with web components
  8. \n
  9. Using the API page
  10. \n
  11. Adding authentication to the page
  12. \n
  13. Accessing Network Requests
  14. \n
  15. Mounting within FastAPI
  16. \n
  17. Security
  18. \n
\n\n

Sharing Demos

\n\n

Gradio demos can be easily shared publicly by setting share=True in the launch() method. Like this:

\n\n
demo.launch(share=True)\n
\n\n

This generates a public, shareable link that you can send to anybody! When you send this link, the user on the other side can try out the model in their browser. Because the processing happens on your device (as long as your device stays on!), you don't have to worry about any packaging any dependencies. A share link usually looks something like this: XXXXX.gradio.app. Although the link is served through a Gradio URL, we are only a proxy for your local server, and do not store any data sent through your app.

\n\n

Keep in mind, however, that these links are publicly accessible, meaning that anyone can use your model for prediction! Therefore, make sure not to expose any sensitive information through the functions you write, or allow any critical changes to occur on your device. If you set share=False (the default, except in colab notebooks), only a local link is created, which can be shared by port-forwarding with specific users.

\n\n

\"sharing\"

\n\n

Share links expire after 72 hours.

\n\n

Hosting on HF Spaces

\n\n

If you'd like to have a permanent link to your Gradio demo on the internet, use Hugging Face Spaces. Hugging Face Spaces provides the infrastructure to permanently host your machine learning model for free!

\n\n

After you have created a free Hugging Face account, you have three methods to deploy your Gradio app to Hugging Face Spaces:

\n\n
    \n
  1. From terminal: run gradio deploy in your app directory. The CLI will gather some basic metadata and then launch your app. To update your space, you can re-run this command or enable the Github Actions option to automatically update the Spaces on git push.

  2. \n
  3. From your browser: Drag and drop a folder containing your Gradio model and all related files here.

  4. \n
  5. Connect Spaces with your Git repository and Spaces will pull the Gradio app from there. See this guide how to host on Hugging Face Spaces for more information.

  6. \n
\n\n

\n\n

Note: Some components, like gr.Image, will display a \"Share\" button only on Spaces, so that users can share the generated output to the Discussions page of the Space easily. You can disable this with show_share_button, such as gr.Image(show_share_button=False).

\n\n

\"Imagesharebutton=True\" />

\n\n

Embedding Hosted Spaces

\n\n

Once you have hosted your app on Hugging Face Spaces (or on your own server), you may want to embed the demo on a different website, such as your blog or your portfolio. Embedding an interactive demo allows people to try out the machine learning model that you have built, without needing to download or install anything \u2014 right in their browser! The best part is that you can embed interactive demos even in static websites, such as GitHub pages.

\n\n

There are two ways to embed your Gradio demos. You can find quick links to both options directly on the Hugging Face Space page, in the \"Embed this Space\" dropdown option:

\n\n

\"Embed

\n\n

Embedding with Web Components

\n\n

Web components typically offer a better experience to users than IFrames. Web components load lazily, meaning that they won't slow down the loading time of your website, and they automatically adjust their height based on the size of the Gradio app.

\n\n

To embed with Web Components:

\n\n
    \n
  1. Import the gradio JS library into into your site by adding the script below in your site (replace {GRADIO_VERSION} in the URL with the library version of Gradio you are using).
  2. \n
\n\n
\n
\n\n
    \n
  1. Add
  2. \n
\n\n
\n
\n\n

element where you want to place the app. Set the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button. For example:

\n\n
\n
\n\n\n\n

You can see examples of how web components look on the Gradio landing page.

\n\n

You can also customize the appearance and behavior of your web component with attributes that you pass into the <gradio-app> tag:

\n\n
    \n
  • src: as we've seen, the src attributes links to the URL of the hosted Gradio demo that you would like to embed
  • \n
  • space: an optional shorthand if your Gradio demo is hosted on Hugging Face Space. Accepts a username/space_name instead of a full URL. Example: gradio/Echocardiogram-Segmentation. If this attribute attribute is provided, then src does not need to be provided.
  • \n
  • control_page_title: a boolean designating whether the html title of the page should be set to the title of the Gradio app (by default \"false\")
  • \n
  • initial_height: the initial height of the web component while it is loading the Gradio app, (by default \"300px\"). Note that the final height is set based on the size of the Gradio app.
  • \n
  • container: whether to show the border frame and information about where the Space is hosted (by default \"true\")
  • \n
  • info: whether to show just the information about where the Space is hosted underneath the embedded app (by default \"true\")
  • \n
  • autoscroll: whether to autoscroll to the output when prediction has finished (by default \"false\")
  • \n
  • eager: whether to load the Gradio app as soon as the page loads (by default \"false\")
  • \n
  • theme_mode: whether to use the dark, light, or default system theme mode (by default \"system\")
  • \n
\n\n

Here's an example of how to use these attributes to create a Gradio app that does not lazy load and has an initial height of 0px.

\n\n
\n
\n\n

Note: While Gradio's CSS will never impact the embedding page, the embedding page can affect the style of the embedded Gradio app. Make sure that any CSS in the parent page isn't so general that it could also apply to the embedded Gradio app and cause the styling to break. Element selectors such as header { ... } and footer { ... } will be the most likely to cause issues.

\n\n

Embedding with IFrames

\n\n

To embed with IFrames instead (if you cannot add javascript to your website, for example), add this element:

\n\n
\n
\n\n

Again, you can find the src= attribute to your Space's embed URL, which you can find in the \"Embed this Space\" button.

\n\n

Note: if you use IFrames, you'll probably want to add a fixed height attribute and set style=\"border:0;\" to remove the boreder. In addition, if your app requires permissions such as access to the webcam or the microphone, you'll need to provide that as well using the allow attribute.

\n\n

API Page

\n\n

You can use almost any Gradio app as an API! In the footer of a Gradio app like this one, you'll see a \"Use via API\" link.

\n\n

\"Use

\n\n

This is a page that lists the endpoints that can be used to query the Gradio app, via our supported clients: either the Python client, or the JavaScript client. For each endpoint, Gradio automatically generates the parameters and their types, as well as example inputs.

\n\n

The endpoints are automatically created when you launch a Gradio Interface. If you are using Gradio Blocks, you can also set up a Gradio API page, though we recommend that you explicitly name each event listener, such as

\n\n
btn.click(add, [num1, num2], output, api_name=\"addition\")\n
\n\n

This will add and document the endpoint /api/addition/ to the automatically generated API page. Otherwise, your API endpoints will appear as \"unnamed\" endpoints.

\n\n

Note: For Gradio apps in which queueing is enabled, users can bypass the queue if they make a POST request to your API endpoint. To disable this behavior, set api_open=False in the queue() method. To disable the API page altogether, set show_api=False in .launch().

\n\n

Authentication

\n\n

You may wish to put an authentication page in front of your app to limit who can open your app. With the auth= keyword argument in the launch() method, you can provide a tuple with a username and password, or a list of acceptable username/password tuples; Here's an example that provides password-based authentication for a single user named \"admin\":

\n\n
demo.launch(auth=(\"admin\", \"pass1234\"))\n
\n\n

For more complex authentication handling, you can even pass a function that takes a username and password as arguments, and returns True to allow authentication, False otherwise. This can be used for, among other things, making requests to 3rd-party authentication services.

\n\n

Here's an example of a function that accepts any login where the username and password are the same:

\n\n
def same_auth(username, password):\n    return username == password\ndemo.launch(auth=same_auth)\n
\n\n

For authentication to work properly, third party cookies must be enabled in your browser.\nThis is not the case by default for Safari, Chrome Incognito Mode.

\n\n

Accessing the Network Request Directly

\n\n

When a user makes a prediction to your app, you may need the underlying network request, in order to get the request headers (e.g. for advanced authentication), log the client's IP address, or for other reasons. Gradio supports this in a similar manner to FastAPI: simply add a function parameter whose type hint is gr.Request and Gradio will pass in the network request as that parameter. Here is an example:

\n\n
import gradio as gr\n\ndef echo(name, request: gr.Request):\n    if request:\n        print(\"Request headers dictionary:\", request.headers)\n        print(\"IP address:\", request.client.host)\n    return name\n\nio = gr.Interface(echo, \"textbox\", \"textbox\").launch()\n
\n\n

Note: if your function is called directly instead of through the UI (this happens, for \nexample, when examples are cached), then request will be None. You should handle\nthis case explicitly to ensure that your app does not throw any errors. That is why\nwe have the explicit check if request.

\n\n

Mounting Within Another FastAPI App

\n\n

In some cases, you might have an existing FastAPI app, and you'd like to add a path for a Gradio demo.\nYou can easily do this with gradio.mount_gradio_app().

\n\n

Here's a complete example:

\n\n
from fastapi import FastAPI\nimport gradio as gr\n\nCUSTOM_PATH = \"/gradio\"\n\napp = FastAPI()\n\n\n@app.get(\"/\")\ndef read_main():\n    return {\"message\": \"This is your main app\"}\n\n\nio = gr.Interface(lambda x: \"Hello, \" + x + \"!\", \"textbox\", \"textbox\")\napp = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)\n\n\n# Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app`\n# and navigate to http://localhost:8000/gradio in your browser.\n\n
\n\n

Note that this approach also allows you run your Gradio apps on custom paths (http://localhost:8000/gradio in the example above).

\n\n

Security and File Access

\n\n

Sharing your Gradio app with others (by hosting it on Spaces, on your own server, or through temporary share links) exposes certain files on the host machine to users of your Gradio app.

\n\n

In particular, Gradio apps ALLOW users to access to three kinds of files:

\n\n
    \n
  • Files in the same directory (or a subdirectory) of where the Gradio script is launched from. For example, if the path to your gradio scripts is /home/usr/scripts/project/app.py and you launch it from /home/usr/scripts/project/, then users of your shared Gradio app will be able to access any files inside /home/usr/scripts/project/. This is done so that you can easily reference these files in your Gradio app (e.g. for your app's examples).

  • \n
  • Temporary files created by Gradio. These are files that are created by Gradio as part of running your prediction function. For example, if your prediction function returns a video file, then Gradio will save that video to a temporary file and then send the path to the temporary file to the front end. You can customize the location of temporary files created by Gradio by setting the environment variable GRADIO_TEMP_DIR to an absolute path, such as /home/usr/scripts/project/temp/.

  • \n
  • Files that you explicitly allow via the allowed_paths parameter in launch(). This parameter allows you to pass in a list of additional directories or exact filepaths you'd like to allow users to have access to. (By default, this parameter is an empty list).

  • \n
\n\n

Gradio DOES NOT ALLOW access to:

\n\n
    \n
  • Dotfiles (any files whose name begins with '.') or any files that are contained in any directory whose name begins with '.'

  • \n
  • Files that you explicitly allow via the blocked_paths parameter in launch(). You can pass in a list of additional directories or exact filepaths to the blocked_paths parameter in launch(). This parameter takes precedence over the files that Gradio exposes by default or by the allowed_paths.

  • \n
  • Any other paths on the host machine. Users should NOT be able to access other arbitrary paths on the host.

  • \n
\n\n

Please make sure you are running the latest version of gradio for these security settings to apply.

\n", "tags": [], "spaces": [], "url": "/guides/sharing-your-app/", "contributor": null}]}, {"category": "Building Interfaces", "guides": [{"name": "interface-state", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 1, "absolute_index": 3, "pretty_name": "Interface State", "content": "# Interface State\n\nThis guide covers how State is handled in Gradio. Learn the difference between Global and Session states, and how to use both.\n\n## Global State\n\nYour function may use data that persists beyond a single function call. If the data is something accessible to all function calls and all users, you can create a variable outside the function call and access it inside the function. For example, you may load a large model outside the function and use it inside the function so that every function call does not need to reload the model. \n\n```python\nimport gradio as gr\n\nscores = []\n\ndef track_score(score):\n scores.append(score)\n top_scores = sorted(scores, reverse=True)[:3]\n return top_scores\n\ndemo = gr.Interface(\n track_score, \n gr.Number(label=\"Score\"), \n gr.JSON(label=\"Top Scores\")\n)\ndemo.launch()\n```\n\nIn the code above, the `scores` array is shared between all users. If multiple users are accessing this demo, their scores will all be added to the same list, and the returned top 3 scores will be collected from this shared reference. \n\n## Session State\n\nAnother type of data persistence Gradio supports is session **state**, where data persists across multiple submits within a page session. However, data is *not* shared between different users of your model. To store data in a session state, you need to do three things:\n\n1. Pass in an extra parameter into your function, which represents the state of the interface.\n2. At the end of the function, return the updated value of the state as an extra return value.\n3. Add the `'state'` input and `'state'` output components when creating your `Interface`\n\nA chatbot is an example where you would need session state - you want access to a users previous submissions, but you cannot store chat history in a global variable, because then chat history would get jumbled between different users. \n\n```python\nimport gradio as gr\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nimport torch\n\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-medium\")\nmodel = AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-medium\")\n\n\ndef user(message, history):\n return \"\", history + [[message, None]]\n\n\ndef bot(history):\n user_message = history[-1][0]\n new_user_input_ids = tokenizer.encode(\n user_message + tokenizer.eos_token, return_tensors=\"pt\"\n )\n\n # append the new user input tokens to the chat history\n bot_input_ids = torch.cat([torch.LongTensor([]), new_user_input_ids], dim=-1)\n\n # generate a response\n response = model.generate(\n bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id\n ).tolist()\n\n # convert the tokens to text, and then split the responses into lines\n response = tokenizer.decode(response[0]).split(\"<|endoftext|>\")\n response = [\n (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)\n ] # convert to tuples of list\n history[-1] = response[0]\n return history\n\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.Button(\"Clear\")\n\n msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.launch()\n\n```\n\n\nNotice how the state persists across submits within each page, but if you load this demo in another tab (or refresh the page), the demos will not share chat history. \n\nThe default value of `state` is None. If you pass a default value to the state parameter of the function, it is used as the default value of the state instead. The `Interface` class only supports a single input and outputs state variable, though it can be a list with multiple elements. For more complex use cases, you can use Blocks, [which supports multiple `State` variables](/guides/state-in-blocks/).", "html": "

Interface State

\n\n

This guide covers how State is handled in Gradio. Learn the difference between Global and Session states, and how to use both.

\n\n

Global State

\n\n

Your function may use data that persists beyond a single function call. If the data is something accessible to all function calls and all users, you can create a variable outside the function call and access it inside the function. For example, you may load a large model outside the function and use it inside the function so that every function call does not need to reload the model.

\n\n
import gradio as gr\n\nscores = []\n\ndef track_score(score):\n    scores.append(score)\n    top_scores = sorted(scores, reverse=True)[:3]\n    return top_scores\n\ndemo = gr.Interface(\n    track_score, \n    gr.Number(label=\"Score\"), \n    gr.JSON(label=\"Top Scores\")\n)\ndemo.launch()\n
\n\n

In the code above, the scores array is shared between all users. If multiple users are accessing this demo, their scores will all be added to the same list, and the returned top 3 scores will be collected from this shared reference.

\n\n

Session State

\n\n

Another type of data persistence Gradio supports is session state, where data persists across multiple submits within a page session. However, data is not shared between different users of your model. To store data in a session state, you need to do three things:

\n\n
    \n
  1. Pass in an extra parameter into your function, which represents the state of the interface.
  2. \n
  3. At the end of the function, return the updated value of the state as an extra return value.
  4. \n
  5. Add the 'state' input and 'state' output components when creating your Interface
  6. \n
\n\n

A chatbot is an example where you would need session state - you want access to a users previous submissions, but you cannot store chat history in a global variable, because then chat history would get jumbled between different users.

\n\n
import gradio as gr\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nimport torch\n\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-medium\")\nmodel = AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-medium\")\n\n\ndef user(message, history):\n    return \"\", history + [[message, None]]\n\n\ndef bot(history):\n    user_message = history[-1][0]\n    new_user_input_ids = tokenizer.encode(\n        user_message + tokenizer.eos_token, return_tensors=\"pt\"\n    )\n\n    # append the new user input tokens to the chat history\n    bot_input_ids = torch.cat([torch.LongTensor([]), new_user_input_ids], dim=-1)\n\n    # generate a response\n    response = model.generate(\n        bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id\n    ).tolist()\n\n    # convert the tokens to text, and then split the responses into lines\n    response = tokenizer.decode(response[0]).split(\"<|endoftext|>\")\n    response = [\n        (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)\n    ]  # convert to tuples of list\n    history[-1] = response[0]\n    return history\n\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.Button(\"Clear\")\n\n    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.launch()\n\n
\n\n

\n\n

Notice how the state persists across submits within each page, but if you load this demo in another tab (or refresh the page), the demos will not share chat history.

\n\n

The default value of state is None. If you pass a default value to the state parameter of the function, it is used as the default value of the state instead. The Interface class only supports a single input and outputs state variable, though it can be a list with multiple elements. For more complex use cases, you can use Blocks, which supports multiple State variables.

\n", "tags": [], "spaces": [], "url": "/guides/interface-state/", "contributor": null}, {"name": "reactive-interfaces", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 2, "absolute_index": 4, "pretty_name": "Reactive Interfaces", "content": "# Reactive Interfaces\n\nThis guide covers how to get Gradio interfaces to refresh automatically or continuously stream data.\n\n## Live Interfaces\n\nYou can make interfaces automatically refresh by setting `live=True` in the interface. Now the interface will recalculate as soon as the user input changes.\n\n```python\nimport gradio as gr\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n return num1 / num2\n\ndemo = gr.Interface(\n calculator,\n [\n \"number\",\n gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n \"number\"\n ],\n \"number\",\n live=True,\n)\ndemo.launch()\n\n```\n\n\nNote there is no submit button, because the interface resubmits automatically on change.\n\n## Streaming Components\n\nSome components have a \"streaming\" mode, such as `Audio` component in microphone mode, or the `Image` component in webcam mode. Streaming means data is sent continuously to the backend and the `Interface` function is continuously being rerun. \n\nThe difference between `gr.Audio(source='microphone')` and `gr.Audio(source='microphone', streaming=True)`, when both are used in `gr.Interface(live=True)`, is that the first `Component` will automatically submit data and run the `Interface` function when the user stops recording, whereas the second `Component` will continuously send data and run the `Interface` function *during* recording.\n\nHere is example code of streaming images from the webcam.\n\n```python\nimport gradio as gr\nimport numpy as np\n\ndef flip(im):\n return np.flipud(im)\n\ndemo = gr.Interface(\n flip, \n gr.Image(source=\"webcam\", streaming=True), \n \"image\",\n live=True\n)\ndemo.launch()\n \n```", "html": "

Reactive Interfaces

\n\n

This guide covers how to get Gradio interfaces to refresh automatically or continuously stream data.

\n\n

Live Interfaces

\n\n

You can make interfaces automatically refresh by setting live=True in the interface. Now the interface will recalculate as soon as the user input changes.

\n\n
import gradio as gr\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        return num1 / num2\n\ndemo = gr.Interface(\n    calculator,\n    [\n        \"number\",\n        gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n        \"number\"\n    ],\n    \"number\",\n    live=True,\n)\ndemo.launch()\n\n
\n\n

\n\n

Note there is no submit button, because the interface resubmits automatically on change.

\n\n

Streaming Components

\n\n

Some components have a \"streaming\" mode, such as Audio component in microphone mode, or the Image component in webcam mode. Streaming means data is sent continuously to the backend and the Interface function is continuously being rerun.

\n\n

The difference between gr.Audio(source='microphone') and gr.Audio(source='microphone', streaming=True), when both are used in gr.Interface(live=True), is that the first Component will automatically submit data and run the Interface function when the user stops recording, whereas the second Component will continuously send data and run the Interface function during recording.

\n\n

Here is example code of streaming images from the webcam.

\n\n
import gradio as gr\nimport numpy as np\n\ndef flip(im):\n    return np.flipud(im)\n\ndemo = gr.Interface(\n    flip, \n    gr.Image(source=\"webcam\", streaming=True), \n    \"image\",\n    live=True\n)\ndemo.launch()\n\n
\n", "tags": [], "spaces": [], "url": "/guides/reactive-interfaces/", "contributor": null}, {"name": "more-on-examples", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 3, "absolute_index": 5, "pretty_name": "More On Examples", "content": "# More on Examples\n\nThis guide covers what more you can do with Examples: Loading examples from a directory, providing partial examples, and caching. If Examples is new to you, check out the intro in the [Key Features](/guides/key-features/#example-inputs) guide. \n\n## Providing Examples\n\nAs covered in the [Key Features](/guides/key-features/#example-inputs) guide, adding examples to an Interface is as easy as providing a list of lists to the `examples`\nkeyword argument. \nEach sublist is a data sample, where each element corresponds to an input of the prediction function.\nThe inputs must be ordered in the same order as the prediction function expects them.\n\nIf your interface only has one input component, then you can provide your examples as a regular list instead of a list of lists.\n\n### Loading Examples from a Directory\n\nYou can also specify a path to a directory containing your examples. If your Interface takes only a single file-type input, e.g. an image classifier, you can simply pass a directory filepath to the `examples=` argument, and the `Interface` will load the images in the directory as examples. \nIn the case of multiple inputs, this directory must\ncontain a log.csv file with the example values.\nIn the context of the calculator demo, we can set `examples='/demo/calculator/examples'` and in that directory we include the following `log.csv` file:\n```csv\nnum,operation,num2\n5,\"add\",3\n4,\"divide\",2\n5,\"multiply\",3\n```\n\nThis can be helpful when browsing flagged data. Simply point to the flagged directory and the `Interface` will load the examples from the flagged data.\n\n### Providing Partial Examples\n\nSometimes your app has many input components, but you would only like to provide examples for a subset of them. In order to exclude some inputs from the examples, pass `None` for all data samples corresponding to those particular components.\n\n## Caching examples\n\nYou may wish to provide some cached examples of your model for users to quickly try out, in case your model takes a while to run normally.\nIf `cache_examples=True`, the `Interface` will run all of your examples through your app and save the outputs when you call the `launch()` method. This data will be saved in a directory called `gradio_cached_examples`. \n\nWhenever a user clicks on an example, the output will automatically be populated in the app now, using data from this cached directory instead of actually running the function. This is useful so users can quickly try out your model without adding any load! \n\nKeep in mind once the cache is generated, it will not be updated in future launches. If the examples or function logic change, delete the cache folder to clear the cache and rebuild it with another `launch()`.\n\n", "html": "

More on Examples

\n\n

This guide covers what more you can do with Examples: Loading examples from a directory, providing partial examples, and caching. If Examples is new to you, check out the intro in the Key Features guide.

\n\n

Providing Examples

\n\n

As covered in the Key Features guide, adding examples to an Interface is as easy as providing a list of lists to the examples\nkeyword argument. \nEach sublist is a data sample, where each element corresponds to an input of the prediction function.\nThe inputs must be ordered in the same order as the prediction function expects them.

\n\n

If your interface only has one input component, then you can provide your examples as a regular list instead of a list of lists.

\n\n

Loading Examples from a Directory

\n\n

You can also specify a path to a directory containing your examples. If your Interface takes only a single file-type input, e.g. an image classifier, you can simply pass a directory filepath to the examples= argument, and the Interface will load the images in the directory as examples. \nIn the case of multiple inputs, this directory must\ncontain a log.csv file with the example values.\nIn the context of the calculator demo, we can set examples='/demo/calculator/examples' and in that directory we include the following log.csv file:

\n\n
num,operation,num2\n5,\"add\",3\n4,\"divide\",2\n5,\"multiply\",3\n
\n\n

This can be helpful when browsing flagged data. Simply point to the flagged directory and the Interface will load the examples from the flagged data.

\n\n

Providing Partial Examples

\n\n

Sometimes your app has many input components, but you would only like to provide examples for a subset of them. In order to exclude some inputs from the examples, pass None for all data samples corresponding to those particular components.

\n\n

Caching examples

\n\n

You may wish to provide some cached examples of your model for users to quickly try out, in case your model takes a while to run normally.\nIf cache_examples=True, the Interface will run all of your examples through your app and save the outputs when you call the launch() method. This data will be saved in a directory called gradio_cached_examples.

\n\n

Whenever a user clicks on an example, the output will automatically be populated in the app now, using data from this cached directory instead of actually running the function. This is useful so users can quickly try out your model without adding any load!

\n\n

Keep in mind once the cache is generated, it will not be updated in future launches. If the examples or function logic change, delete the cache folder to clear the cache and rebuild it with another launch().

\n", "tags": [], "spaces": [], "url": "/guides/more-on-examples/", "contributor": null}, {"name": "advanced-interface-features", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 4, "absolute_index": 6, "pretty_name": "Advanced Interface Features", "content": "# Advanced Interface Features\n\nThere's more to cover on the [Interface](https://gradio.app/docs#interface) class. This guide covers all the advanced features: Using [Interpretation](https://gradio.app/docs#interpretation), custom styling, loading from the [Hugging Face Hub](https://hf.co), and using [Parallel](https://gradio.app/docs#parallel) and [Series](https://gradio.app/docs#series). \n\n## Interpreting your Predictions\n\nMost models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the `interpretation` keyword in the `Interface` class to `default`. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:\n\n```python\nimport requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2() # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n```\n\n\nIn addition to `default`, Gradio also includes [Shapley-based interpretation](https://christophm.github.io/interpretable-ml-book/shap.html), which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the `interpretation` parameter to `\"shap\"` (note: also make sure the python package `shap` is installed). Optionally, you can modify the `num_shap` parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:\n\n```python\ngr.Interface(fn=classify_image,\n inputs=image, \n outputs=label, \n interpretation=\"shap\", \n num_shap=5).launch()\n```\n\nThis will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's `default` or `shap` interpretation, the output component must be a `Label`. All common input components are supported. Here is an example with text input.\n\n```python\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=\"default\",\n)\n\ndemo.launch()\n\n```\n\nSo what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.\n\nYou can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.\n\n```python\nimport re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n male_count = len([word for word in sentence.split() if word.lower() in male_words])\n female_count = len(\n [word for word in sentence.split() if word.lower() in female_words]\n )\n total = max(male_count + female_count, 1)\n return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n result = gender_of_sentence(sentence)\n is_male = result[\"male\"] > result[\"female\"]\n interpretation = []\n for word in re.split(\"( )\", sentence):\n score = 0\n token = word.lower()\n if (is_male and token in male_words) or (not is_male and token in female_words):\n score = 1\n elif (is_male and token in female_words) or (\n not is_male and token in male_words\n ):\n score = -1\n interpretation.append((word, score))\n # Output must be a list of lists containing the same number of elements as inputs\n # Each element corresponds to the interpretation scores for the given input\n return [interpretation]\n\n\ndemo = gr.Interface(\n fn=gender_of_sentence,\n inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n outputs=\"label\",\n interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n```\n\nLearn more about Interpretation in the [docs](https://gradio.app/docs#interpretation). \n\n## Custom Styling\n\nIf you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the `css` parameter of the `Interface` class.\n\n```python\ngr.Interface(..., css=\"body {background-color: red}\")\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\ngr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n```\n\n**Warning**: Custom CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using [Themes](/guides/theming-guide/) whenever possible. \n\n## Loading Hugging Face Models and Spaces\n\nGradio integrates nicely with the [Hugging Face Hub](https://hf.co), allowing you to load models and Spaces with just one line of code. To use this, simply use the `load()` method in the `Interface` class. So:\n\n- To load any model from the Hugging Face Hub and create an interface around it, you pass `\"model/\"` or `\"huggingface/\"` followed by the model name, like these examples:\n\n```python\ngr.Interface.load(\"huggingface/gpt2\").launch();\n```\n\n```python\ngr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n inputs=gr.Textbox(lines=5, label=\"Input Text\") # customizes the input component\n).launch()\n```\n\n- To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass `\"spaces/\"` followed by the model name:\n\n```python\ngr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n inputs=\"webcam\", \n title=\"Remove your webcam background!\").launch()\n```\n\nOne of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting `Interface` object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):\n\n```python\nio = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\") # outputs model completion\n```\n\n## Putting Interfaces in Parallel and Series\n\nGradio also lets you mix interfaces very easily using the `gradio.Parallel` and `gradio.Series` classes. `Parallel` lets you put two similar models (if they have the same input type) in parallel to compare model predictions:\n\n```python\ngenerator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n```\n\n`Series` lets you put models and spaces in series, piping the output of one model into the input of the next model. \n\n```python\ngenerator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch() \n# this demo generates text, then translates it to German, and outputs the final result.\n```\n\nAnd of course, you can also mix `Parallel` and `Series` together whenever that makes sense!\n\nLearn more about Parallel and Series in the [docs](https://gradio.app/docs#parallel). ", "html": "

Advanced Interface Features

\n\n

There's more to cover on the Interface class. This guide covers all the advanced features: Using Interpretation, custom styling, loading from the Hugging Face Hub, and using Parallel and Series.

\n\n

Interpreting your Predictions

\n\n

Most models are black boxes such that the internal logic of the function is hidden from the end user. To encourage transparency, we've made it very easy to add interpretation to your model by simply setting the interpretation keyword in the Interface class to default. This allows your users to understand what parts of the input are responsible for the output. Take a look at the simple interface below which shows an image classifier that also includes interpretation:

\n\n
import requests\nimport tensorflow as tf\n\nimport gradio as gr\n\ninception_net = tf.keras.applications.MobileNetV2()  # load the model\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef classify_image(inp):\n    inp = inp.reshape((-1, 224, 224, 3))\n    inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n    prediction = inception_net.predict(inp).flatten()\n    return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\nimage = gr.Image(shape=(224, 224))\nlabel = gr.Label(num_top_classes=3)\n\ndemo = gr.Interface(\n    fn=classify_image, inputs=image, outputs=label, interpretation=\"default\"\n)\n\ndemo.launch()\n\n
\n\n

In addition to default, Gradio also includes Shapley-based interpretation, which provides more accurate interpretations, albeit usually with a slower runtime. To use this, simply set the interpretation parameter to \"shap\" (note: also make sure the python package shap is installed). Optionally, you can modify the num_shap parameter, which controls the tradeoff between accuracy and runtime (increasing this value generally increases accuracy). Here is an example:

\n\n
gr.Interface(fn=classify_image,\n            inputs=image, \n            outputs=label, \n            interpretation=\"shap\", \n            num_shap=5).launch()\n
\n\n

This will work for any function, even if internally, the model is a complex neural network or some other black box. If you use Gradio's default or shap interpretation, the output component must be a Label. All common input components are supported. Here is an example with text input.

\n\n
import gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=\"default\",\n)\n\ndemo.launch()\n\n
\n\n

So what is happening under the hood? With these interpretation methods, Gradio runs the prediction multiple times with modified versions of the input. Based on the results, you'll see that the interface automatically highlights the parts of the text (or image, etc.) that contributed increased the likelihood of the class as red. The intensity of color corresponds to the importance of that part of the input. The parts that decrease the class confidence are highlighted blue.

\n\n

You can also write your own interpretation function. The demo below adds custom interpretation to the previous demo. This function will take the same inputs as the main wrapped function. The output of this interpretation function will be used to highlight the input of each input component - therefore the function must return a list where the number of elements corresponds to the number of input components. To see the format for interpretation for each input component, check the Docs.

\n\n
import re\n\nimport gradio as gr\n\nmale_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n\n\ndef gender_of_sentence(sentence):\n    male_count = len([word for word in sentence.split() if word.lower() in male_words])\n    female_count = len(\n        [word for word in sentence.split() if word.lower() in female_words]\n    )\n    total = max(male_count + female_count, 1)\n    return {\"male\": male_count / total, \"female\": female_count / total}\n\n\n# Number of arguments to interpretation function must\n# match number of inputs to prediction function\ndef interpret_gender(sentence):\n    result = gender_of_sentence(sentence)\n    is_male = result[\"male\"] > result[\"female\"]\n    interpretation = []\n    for word in re.split(\"( )\", sentence):\n        score = 0\n        token = word.lower()\n        if (is_male and token in male_words) or (not is_male and token in female_words):\n            score = 1\n        elif (is_male and token in female_words) or (\n            not is_male and token in male_words\n        ):\n            score = -1\n        interpretation.append((word, score))\n    # Output must be a list of lists containing the same number of elements as inputs\n    # Each element corresponds to the interpretation scores for the given input\n    return [interpretation]\n\n\ndemo = gr.Interface(\n    fn=gender_of_sentence,\n    inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n    outputs=\"label\",\n    interpretation=interpret_gender,\n)\n\ndemo.launch()\n\n
\n\n

Learn more about Interpretation in the docs.

\n\n

Custom Styling

\n\n

If you'd like to have more fine-grained control over any aspect of your demo, you can also write your own css or pass in a filepath to a css file, with the css parameter of the Interface class.

\n\n
gr.Interface(..., css=\"body {background-color: red}\")\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
gr.Interface(..., css=\"body {background-image: url('file=clouds.jpg')}\")\n
\n\n

Warning: Custom CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using custom CSS sparingly and instead using Themes whenever possible.

\n\n

Loading Hugging Face Models and Spaces

\n\n

Gradio integrates nicely with the Hugging Face Hub, allowing you to load models and Spaces with just one line of code. To use this, simply use the load() method in the Interface class. So:

\n\n
    \n
  • To load any model from the Hugging Face Hub and create an interface around it, you pass \"model/\" or \"huggingface/\" followed by the model name, like these examples:
  • \n
\n\n
gr.Interface.load(\"huggingface/gpt2\").launch();\n
\n\n
gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\", \n    inputs=gr.Textbox(lines=5, label=\"Input Text\")  # customizes the input component\n).launch()\n
\n\n
    \n
  • To load any Space from the Hugging Face Hub and recreate it locally (so that you can customize the inputs and outputs for example), you pass \"spaces/\" followed by the model name:
  • \n
\n\n
gr.Interface.load(\"spaces/eugenesiow/remove-bg\", \n                  inputs=\"webcam\", \n                  title=\"Remove your webcam background!\").launch()\n
\n\n

One of the great things about loading Hugging Face models or spaces using Gradio is that you can then immediately use the resulting Interface object just like function in your Python code (this works for every type of model/space: text, images, audio, video, and even multimodal models):

\n\n
io = gr.Interface.load(\"models/EleutherAI/gpt-neo-2.7B\")\nio(\"It was the best of times\")  # outputs model completion\n
\n\n

Putting Interfaces in Parallel and Series

\n\n

Gradio also lets you mix interfaces very easily using the gradio.Parallel and gradio.Series classes. Parallel lets you put two similar models (if they have the same input type) in parallel to compare model predictions:

\n\n
generator1 = gr.Interface.load(\"huggingface/gpt2\")\ngenerator2 = gr.Interface.load(\"huggingface/EleutherAI/gpt-neo-2.7B\")\ngenerator3 = gr.Interface.load(\"huggingface/EleutherAI/gpt-j-6B\")\n\ngr.Parallel(generator1, generator2, generator3).launch()\n
\n\n

Series lets you put models and spaces in series, piping the output of one model into the input of the next model.

\n\n
generator = gr.Interface.load(\"huggingface/gpt2\")\ntranslator = gr.Interface.load(\"huggingface/t5-small\")\n\ngr.Series(generator, translator).launch()  \n# this demo generates text, then translates it to German, and outputs the final result.\n
\n\n

And of course, you can also mix Parallel and Series together whenever that makes sense!

\n\n

Learn more about Parallel and Series in the docs.

\n", "tags": [], "spaces": [], "url": "/guides/advanced-interface-features/", "contributor": null}, {"name": "four-kinds-of-interfaces", "category": "building-interfaces", "pretty_category": "Building Interfaces", "guide_index": 5, "absolute_index": 7, "pretty_name": "Four Kinds Of Interfaces", "content": "# The 4 Kinds of Gradio Interfaces\n\nSo far, we've always assumed that in order to build an Gradio demo, you need both inputs and outputs. But this isn't always the case for machine learning demos: for example, *unconditional image generation models* don't take any input but produce an image as the output.\n\nIt turns out that the `gradio.Interface` class can actually handle 4 different kinds of demos:\n\n1. **Standard demos**: which have both separate inputs and outputs (e.g. an image classifier or speech-to-text model)\n2. **Output-only demos**: which don't take any input but produce on output (e.g. an unconditional image generation model)\n3. **Input-only demos**: which don't produce any output but do take in some sort of input (e.g. a demo that saves images that you upload to a persistent external database)\n4. **Unified demos**: which have both input and output components, but the input and output components *are the same*. This means that the output produced overrides the input (e.g. a text autocomplete model)\n\nDepending on the kind of demo, the user interface (UI) looks slightly different:\n\n![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/interfaces4.png)\n\n\nLet's see how to build each kind of demo using the `Interface` class, along with examples:\n\n\n## Standard demos\n\nTo create a demo that has both the input and the output components, you simply need to set the values of the `inputs` and `outputs` parameter in `Interface()`. Here's an example demo of a simple image filter:\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n sepia_filter = np.array([\n [0.393, 0.769, 0.189], \n [0.349, 0.686, 0.168], \n [0.272, 0.534, 0.131]\n ])\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n```\n\n\n\n## Output-only demos\n\nWhat about demos that only contain outputs? In order to build such a demo, you simply set the value of the `inputs` parameter in `Interface()` to `None`. Here's an example demo of a mock image generation model:\n\n```python\nimport time\n\nimport gradio as gr\n\n\ndef fake_gan():\n time.sleep(1)\n images = [\n \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n ]\n return images\n\n\ndemo = gr.Interface(\n fn=fake_gan,\n inputs=None,\n outputs=gr.Gallery(label=\"Generated Images\").style(grid=[2]),\n title=\"FD-GAN\",\n description=\"This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.\",\n)\n\ndemo.launch()\n\n```\n\n\n## Input-only demos\n\nSimilarly, to create a demo that only contains inputs, set the value of `outputs` parameter in `Interface()` to be `None`. Here's an example demo that saves any uploaded image to disk:\n\n```python\nimport random\nimport string\nimport gradio as gr \n\ndef save_image_random_name(image):\n random_string = ''.join(random.choices(string.ascii_letters, k=20)) + '.png'\n image.save(random_string)\n print(f\"Saved image to {random_string}!\")\n\ndemo = gr.Interface(\n fn=save_image_random_name, \n inputs=gr.Image(type=\"pil\"), \n outputs=None,\n)\ndemo.launch()\n```\n\n\n## Unified demos\n\nA demo that has a single component as both the input and the output. It can simply be created by setting the values of the `inputs` and `outputs` parameter as the same component. Here's an example demo of a text generation model:\n\n```python\nimport gradio as gr\nfrom transformers import pipeline\n\ngenerator = pipeline('text-generation', model = 'gpt2')\n\ndef generate_text(text_prompt):\n response = generator(text_prompt, max_length = 30, num_return_sequences=5)\n return response[0]['generated_text']\n\ntextbox = gr.Textbox()\n\ndemo = gr.Interface(generate_text, textbox, textbox)\n\ndemo.launch()\n\n```\n\n", "html": "

The 4 Kinds of Gradio Interfaces

\n\n

So far, we've always assumed that in order to build an Gradio demo, you need both inputs and outputs. But this isn't always the case for machine learning demos: for example, unconditional image generation models don't take any input but produce an image as the output.

\n\n

It turns out that the gradio.Interface class can actually handle 4 different kinds of demos:

\n\n
    \n
  1. Standard demos: which have both separate inputs and outputs (e.g. an image classifier or speech-to-text model)
  2. \n
  3. Output-only demos: which don't take any input but produce on output (e.g. an unconditional image generation model)
  4. \n
  5. Input-only demos: which don't produce any output but do take in some sort of input (e.g. a demo that saves images that you upload to a persistent external database)
  6. \n
  7. Unified demos: which have both input and output components, but the input and output components are the same. This means that the output produced overrides the input (e.g. a text autocomplete model)
  8. \n
\n\n

Depending on the kind of demo, the user interface (UI) looks slightly different:

\n\n

\"\"

\n\n

Let's see how to build each kind of demo using the Interface class, along with examples:

\n\n

Standard demos

\n\n

To create a demo that has both the input and the output components, you simply need to set the values of the inputs and outputs parameter in Interface(). Here's an example demo of a simple image filter:

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img):\n    sepia_filter = np.array([\n        [0.393, 0.769, 0.189], \n        [0.349, 0.686, 0.168], \n        [0.272, 0.534, 0.131]\n    ])\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ndemo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\ndemo.launch()\n\n
\n\n

\n\n

Output-only demos

\n\n

What about demos that only contain outputs? In order to build such a demo, you simply set the value of the inputs parameter in Interface() to None. Here's an example demo of a mock image generation model:

\n\n
import time\n\nimport gradio as gr\n\n\ndef fake_gan():\n    time.sleep(1)\n    images = [\n            \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n            \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n            \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n    ]\n    return images\n\n\ndemo = gr.Interface(\n    fn=fake_gan,\n    inputs=None,\n    outputs=gr.Gallery(label=\"Generated Images\").style(grid=[2]),\n    title=\"FD-GAN\",\n    description=\"This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.\",\n)\n\ndemo.launch()\n\n
\n\n

\n\n

Input-only demos

\n\n

Similarly, to create a demo that only contains inputs, set the value of outputs parameter in Interface() to be None. Here's an example demo that saves any uploaded image to disk:

\n\n
import random\nimport string\nimport gradio as gr \n\ndef save_image_random_name(image):\n    random_string = ''.join(random.choices(string.ascii_letters, k=20)) + '.png'\n    image.save(random_string)\n    print(f\"Saved image to {random_string}!\")\n\ndemo = gr.Interface(\n    fn=save_image_random_name, \n    inputs=gr.Image(type=\"pil\"), \n    outputs=None,\n)\ndemo.launch()\n
\n\n

\n\n

Unified demos

\n\n

A demo that has a single component as both the input and the output. It can simply be created by setting the values of the inputs and outputs parameter as the same component. Here's an example demo of a text generation model:

\n\n
import gradio as gr\nfrom transformers import pipeline\n\ngenerator = pipeline('text-generation', model = 'gpt2')\n\ndef generate_text(text_prompt):\n  response = generator(text_prompt, max_length = 30, num_return_sequences=5)\n  return response[0]['generated_text']\n\ntextbox = gr.Textbox()\n\ndemo = gr.Interface(generate_text, textbox, textbox)\n\ndemo.launch()\n\n
\n\n

\n", "tags": [], "spaces": [], "url": "/guides/four-kinds-of-interfaces/", "contributor": null}]}, {"category": "Building With Blocks", "guides": [{"name": "blocks-and-event-listeners", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 1, "absolute_index": 8, "pretty_name": "Blocks And Event Listeners", "content": "# Blocks and Event Listeners\n\nWe took a quick look at Blocks in the [Quickstart](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control). Let's dive deeper. This guide will cover the how Blocks are structured, event listeners and their types, running events continuously, updating configurations, and using dictionaries vs lists. \n\n## Blocks Structure\n\nTake a look at the demo below.\n\n```python\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n name = gr.Textbox(label=\"Name\")\n output = gr.Textbox(label=\"Output Box\")\n greet_btn = gr.Button(\"Greet\")\n greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n \n\ndemo.launch()\n```\n\n\n- First, note the `with gr.Blocks() as demo:` clause. The Blocks app code will be contained within this clause.\n- Next come the Components. These are the same Components used in `Interface`. However, instead of being passed to some constructor, Components are automatically added to the Blocks as they are created within the `with` clause.\n- Finally, the `click()` event listener. Event listeners define the data flow within the app. In the example above, the listener ties the two Textboxes together. The Textbox `name` acts as the input and Textbox `output` acts as the output to the `greet` method. This dataflow is triggered when the Button `greet_btn` is clicked. Like an Interface, an event listener can take multiple inputs or outputs.\n\n## Event Listeners and Interactivity\n\nIn the example above, you'll notice that you are able to edit Textbox `name`, but not Textbox `output`. This is because any Component that acts as an input to an event listener is made interactive. However, since Textbox `output` acts only as an output, Gradio determines that it should not be made interactive. You can override the default behavior and directly configure the interactivity of a Component with the boolean `interactive` keyword argument. \n\n```python\noutput = gr.Textbox(label=\"Output\", interactive=True)\n```\n\n_Note_: What happens if a Gradio component is neither an input nor an output? If a component is constructed with a default value, then it is presumed to be displaying content and is rendered non-interactive. Otherwise, it is rendered interactive. Again, this behavior can be overridden by specifying a value for the `interactive` argument.\n\n## Types of Event Listeners\n\nTake a look at the demo below:\n\n```python\nimport gradio as gr\n\ndef welcome(name):\n return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n # Hello World!\n Start typing below to see the output.\n \"\"\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n inp.change(welcome, inp, out)\n\ndemo.launch()\n```\n\n\nInstead of being triggered by a click, the `welcome` function is triggered by typing in the Textbox `inp`. This is due to the `change()` event listener. Different Components support different event listeners. For example, the `Video` Component supports a `play()` event listener, triggered when a user presses play. See the [Docs](http://gradio.app/docs#components) for the event listeners for each Component.\n\n## Multiple Data Flows\n\nA Blocks app is not limited to a single data flow the way Interfaces are. Take a look at the demo below:\n\n```python\nimport gradio as gr\n\ndef increase(num):\n return num + 1\n\nwith gr.Blocks() as demo:\n a = gr.Number(label=\"a\")\n b = gr.Number(label=\"b\")\n btoa = gr.Button(\"a > b\")\n atob = gr.Button(\"b > a\")\n atob.click(increase, a, b)\n btoa.click(increase, b, a)\n\ndemo.launch()\n```\n\n\nNote that `num1` can act as input to `num2`, and also vice-versa! As your apps get more complex, you will have many data flows connecting various Components. \n\nHere's an example of a \"multi-step\" demo, where the output of one model (a speech-to-text model) gets fed into the next model (a sentiment classifier).\n\n```python\nfrom transformers import pipeline\n\nimport gradio as gr\n\nasr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\nclassifier = pipeline(\"text-classification\")\n\n\ndef speech_to_text(speech):\n text = asr(speech)[\"text\"]\n return text\n\n\ndef text_to_sentiment(text):\n return classifier(text)[0][\"label\"]\n\n\ndemo = gr.Blocks()\n\nwith demo:\n audio_file = gr.Audio(type=\"filepath\")\n text = gr.Textbox()\n label = gr.Label()\n\n b1 = gr.Button(\"Recognize Speech\")\n b2 = gr.Button(\"Classify Sentiment\")\n\n b1.click(speech_to_text, inputs=audio_file, outputs=text)\n b2.click(text_to_sentiment, inputs=text, outputs=label)\n\ndemo.launch()\n\n```\n\n\n## Function Input List vs Dict\n\nThe event listeners you've seen so far have a single input component. If you'd like to have multiple input components pass data to the function, you have two options on how the function can accept input component values:\n\n1. as a list of arguments, or\n2. as a single dictionary of values, keyed by the component\n\nLet's see an example of each:\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n a = gr.Number(label=\"a\")\n b = gr.Number(label=\"b\")\n with gr.Row():\n add_btn = gr.Button(\"Add\")\n sub_btn = gr.Button(\"Subtract\")\n c = gr.Number(label=\"sum\")\n\n def add(num1, num2):\n return num1 + num2\n add_btn.click(add, inputs=[a, b], outputs=c)\n\n def sub(data):\n return data[a] - data[b]\n sub_btn.click(sub, inputs={a, b}, outputs=c)\n\n\ndemo.launch()\n```\n\nBoth `add()` and `sub()` take `a` and `b` as inputs. However, the syntax is different between these listeners. \n\n1. To the `add_btn` listener, we pass the inputs as a list. The function `add()` takes each of these inputs as arguments. The value of `a` maps to the argument `num1`, and the value of `b` maps to the argument `num2`.\n2. To the `sub_btn` listener, we pass the inputs as a set (note the curly brackets!). The function `sub()` takes a single dictionary argument `data`, where the keys are the input components, and the values are the values of those components.\n\nIt is a matter of preference which syntax you prefer! For functions with many input components, option 2 may be easier to manage.\n\n\n\n## Function Return List vs Dict\n\nSimilarly, you may return values for multiple output components either as:\n\n1. a list of values, or\n2. a dictionary keyed by the component\n\nLet's first see an example of (1), where we set the values of two output components by returning two values:\n\n```python\nwith gr.Blocks() as demo:\n food_box = gr.Number(value=10, label=\"Food Count\")\n status_box = gr.Textbox()\n def eat(food):\n if food > 0:\n return food - 1, \"full\"\n else:\n return 0, \"hungry\"\n gr.Button(\"EAT\").click(\n fn=eat, \n inputs=food_box,\n outputs=[food_box, status_box]\n )\n```\n\nAbove, each return statement returns two values corresponding to `food_box` and `status_box`, respectively.\n\nInstead of returning a list of values corresponding to each output component in order, you can also return a dictionary, with the key corresponding to the output component and the value as the new value. This also allows you to skip updating some output components. \n\n```python\nwith gr.Blocks() as demo:\n food_box = gr.Number(value=10, label=\"Food Count\")\n status_box = gr.Textbox()\n def eat(food):\n if food > 0:\n return {food_box: food - 1, status_box: \"full\"}\n else:\n return {status_box: \"hungry\"}\n gr.Button(\"EAT\").click(\n fn=eat, \n inputs=food_box,\n outputs=[food_box, status_box]\n )\n```\n\nNotice how when there is no food, we only update the `status_box` element. We skipped updating the `food_box` component.\n\nDictionary returns are helpful when an event listener affects many components on return, or conditionally affects outputs and not others.\n\nKeep in mind that with dictionary returns, we still need to specify the possible outputs in the event listener.\n\n## Updating Component Configurations\n\nThe return value of an event listener function is usually the updated value of the corresponding output Component. Sometimes we want to update the configuration of the Component as well, such as the visibility. In this case, we return a `gr.update()` object instead of just the update Component value.\n\n```python\nimport gradio as gr\n\ndef change_textbox(choice):\n if choice == \"short\":\n return gr.update(lines=2, visible=True, value=\"Short story: \")\n elif choice == \"long\":\n return gr.update(lines=8, visible=True, value=\"Long story...\")\n else:\n return gr.update(visible=False)\n\nwith gr.Blocks() as demo:\n radio = gr.Radio(\n [\"short\", \"long\", \"none\"], label=\"Essay Length to Write?\"\n )\n text = gr.Textbox(lines=2, interactive=True)\n radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\ndemo.launch()\n```\n\n\nSee how we can configure the Textbox itself through the `gr.update()` method. The `value=` argument can still be used to update the value along with Component configuration.\n\n## Running Events Consecutively\n\nYou can also run events consecutively by using the `then` method of an event listener. This will run an event after the previous event has finished running. This is useful for running events that update components in multiple steps. \n\nFor example, in the chatbot example below, we first update the chatbot with the user message immediately, and then update the chatbot with the computer response after a simulated delay.\n\n```python\nimport gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.Button(\"Clear\")\n\n def user(user_message, history):\n return \"\", history + [[user_message, None]]\n\n def bot(history):\n bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n time.sleep(2)\n history[-1][1] = bot_message\n return history\n\n msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n clear.click(lambda: None, None, chatbot, queue=False)\n \ndemo.queue()\ndemo.launch()\n\n```\n\n\nThe `.then()` method of an event listener executes the subsequent event regardless of whether the previous event raised any errors. If you'd like to only run subsequent events if the previous event executed successfully, use the `.success()` method, which takes the same arguments as `.then()`.\n\n## Running Events Continuously\n\nYou can run events on a fixed schedule using the `every` parameter of the event listener. This will run the event\n`every` number of seconds while the client connection is open. If the connection is closed, the event will stop running after the following iteration.\nNote that this does not take into account the runtime of the event itself. So a function\nwith a 1 second runtime running with `every=5`, would actually run every 6 seconds.\n\nHere is an example of a sine curve that updates every second!\n\n```python\nimport math\nimport gradio as gr\nimport plotly.express as px\nimport numpy as np\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2*math.pi*period * x)\n fig = px.line(x=x, y=y)\n plot_end += 2 * math.pi\n if plot_end > 1000:\n plot_end = 2 * math.pi\n return fig\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n plot = gr.Plot(label=\"Plot (updates every half second)\")\n\n dep = demo.load(get_plot, None, plot, every=1)\n period.change(get_plot, period, plot, every=1, cancels=[dep])\n\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n\n```\n\n\n## Gathering Event Data\n\nYou can gather specific data about an event by adding the associated event data class as a type hint to an argument in the event listener function. \n\nFor example, event data for `.select()` can be type hinted by a `gradio.SelectData` argument. This event is triggered when a user selects some part of the triggering component, and the event data includes information about what the user specifically selected. If a user selected a specific word in a `Textbox`, a specific image in a `Gallery`, or a specific cell in a `DataFrame`, the event data argument would contain information about the specific selection.\n\nIn the 2 player tic-tac-toe demo below, a user can select a cell in the `DataFrame` to make a move. The event data argument contains information about the specific cell that was selected. We can first check to see if the cell is empty, and then update the cell with the user's move. \n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n\n def place(board, turn, evt: gr.SelectData):\n if evt.value:\n return board, turn\n board[evt.index[0]][evt.index[1]] = turn\n turn = \"O\" if turn == \"X\" else \"X\"\n return board, turn\n\n board.select(place, [board, turn], [board, turn])\n\ndemo.launch()\n```\n", "html": "

Blocks and Event Listeners

\n\n

We took a quick look at Blocks in the Quickstart. Let's dive deeper. This guide will cover the how Blocks are structured, event listeners and their types, running events continuously, updating configurations, and using dictionaries vs lists.

\n\n

Blocks Structure

\n\n

Take a look at the demo below.

\n\n
import gradio as gr\n\ndef greet(name):\n    return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n    name = gr.Textbox(label=\"Name\")\n    output = gr.Textbox(label=\"Output Box\")\n    greet_btn = gr.Button(\"Greet\")\n    greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n\n\ndemo.launch()\n
\n\n

\n\n
    \n
  • First, note the with gr.Blocks() as demo: clause. The Blocks app code will be contained within this clause.
  • \n
  • Next come the Components. These are the same Components used in Interface. However, instead of being passed to some constructor, Components are automatically added to the Blocks as they are created within the with clause.
  • \n
  • Finally, the click() event listener. Event listeners define the data flow within the app. In the example above, the listener ties the two Textboxes together. The Textbox name acts as the input and Textbox output acts as the output to the greet method. This dataflow is triggered when the Button greet_btn is clicked. Like an Interface, an event listener can take multiple inputs or outputs.
  • \n
\n\n

Event Listeners and Interactivity

\n\n

In the example above, you'll notice that you are able to edit Textbox name, but not Textbox output. This is because any Component that acts as an input to an event listener is made interactive. However, since Textbox output acts only as an output, Gradio determines that it should not be made interactive. You can override the default behavior and directly configure the interactivity of a Component with the boolean interactive keyword argument.

\n\n
output = gr.Textbox(label=\"Output\", interactive=True)\n
\n\n

Note: What happens if a Gradio component is neither an input nor an output? If a component is constructed with a default value, then it is presumed to be displaying content and is rendered non-interactive. Otherwise, it is rendered interactive. Again, this behavior can be overridden by specifying a value for the interactive argument.

\n\n

Types of Event Listeners

\n\n

Take a look at the demo below:

\n\n
import gradio as gr\n\ndef welcome(name):\n    return f\"Welcome to Gradio, {name}!\"\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\n    \"\"\"\n    # Hello World!\n    Start typing below to see the output.\n    \"\"\")\n    inp = gr.Textbox(placeholder=\"What is your name?\")\n    out = gr.Textbox()\n    inp.change(welcome, inp, out)\n\ndemo.launch()\n
\n\n

\n\n

Instead of being triggered by a click, the welcome function is triggered by typing in the Textbox inp. This is due to the change() event listener. Different Components support different event listeners. For example, the Video Component supports a play() event listener, triggered when a user presses play. See the Docs for the event listeners for each Component.

\n\n

Multiple Data Flows

\n\n

A Blocks app is not limited to a single data flow the way Interfaces are. Take a look at the demo below:

\n\n
import gradio as gr\n\ndef increase(num):\n    return num + 1\n\nwith gr.Blocks() as demo:\n    a = gr.Number(label=\"a\")\n    b = gr.Number(label=\"b\")\n    btoa = gr.Button(\"a > b\")\n    atob = gr.Button(\"b > a\")\n    atob.click(increase, a, b)\n    btoa.click(increase, b, a)\n\ndemo.launch()\n
\n\n

\n\n

Note that num1 can act as input to num2, and also vice-versa! As your apps get more complex, you will have many data flows connecting various Components.

\n\n

Here's an example of a \"multi-step\" demo, where the output of one model (a speech-to-text model) gets fed into the next model (a sentiment classifier).

\n\n
from transformers import pipeline\n\nimport gradio as gr\n\nasr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\nclassifier = pipeline(\"text-classification\")\n\n\ndef speech_to_text(speech):\n    text = asr(speech)[\"text\"]\n    return text\n\n\ndef text_to_sentiment(text):\n    return classifier(text)[0][\"label\"]\n\n\ndemo = gr.Blocks()\n\nwith demo:\n    audio_file = gr.Audio(type=\"filepath\")\n    text = gr.Textbox()\n    label = gr.Label()\n\n    b1 = gr.Button(\"Recognize Speech\")\n    b2 = gr.Button(\"Classify Sentiment\")\n\n    b1.click(speech_to_text, inputs=audio_file, outputs=text)\n    b2.click(text_to_sentiment, inputs=text, outputs=label)\n\ndemo.launch()\n\n
\n\n

\n\n

Function Input List vs Dict

\n\n

The event listeners you've seen so far have a single input component. If you'd like to have multiple input components pass data to the function, you have two options on how the function can accept input component values:

\n\n
    \n
  1. as a list of arguments, or
  2. \n
  3. as a single dictionary of values, keyed by the component
  4. \n
\n\n

Let's see an example of each:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    a = gr.Number(label=\"a\")\n    b = gr.Number(label=\"b\")\n    with gr.Row():\n        add_btn = gr.Button(\"Add\")\n        sub_btn = gr.Button(\"Subtract\")\n    c = gr.Number(label=\"sum\")\n\n    def add(num1, num2):\n        return num1 + num2\n    add_btn.click(add, inputs=[a, b], outputs=c)\n\n    def sub(data):\n        return data[a] - data[b]\n    sub_btn.click(sub, inputs={a, b}, outputs=c)\n\n\ndemo.launch()\n
\n\n

Both add() and sub() take a and b as inputs. However, the syntax is different between these listeners.

\n\n
    \n
  1. To the add_btn listener, we pass the inputs as a list. The function add() takes each of these inputs as arguments. The value of a maps to the argument num1, and the value of b maps to the argument num2.
  2. \n
  3. To the sub_btn listener, we pass the inputs as a set (note the curly brackets!). The function sub() takes a single dictionary argument data, where the keys are the input components, and the values are the values of those components.
  4. \n
\n\n

It is a matter of preference which syntax you prefer! For functions with many input components, option 2 may be easier to manage.

\n\n

\n\n

Function Return List vs Dict

\n\n

Similarly, you may return values for multiple output components either as:

\n\n
    \n
  1. a list of values, or
  2. \n
  3. a dictionary keyed by the component
  4. \n
\n\n

Let's first see an example of (1), where we set the values of two output components by returning two values:

\n\n
with gr.Blocks() as demo:\n    food_box = gr.Number(value=10, label=\"Food Count\")\n    status_box = gr.Textbox()\n    def eat(food):\n        if food > 0:\n            return food - 1, \"full\"\n        else:\n            return 0, \"hungry\"\n    gr.Button(\"EAT\").click(\n        fn=eat, \n        inputs=food_box,\n        outputs=[food_box, status_box]\n    )\n
\n\n

Above, each return statement returns two values corresponding to food_box and status_box, respectively.

\n\n

Instead of returning a list of values corresponding to each output component in order, you can also return a dictionary, with the key corresponding to the output component and the value as the new value. This also allows you to skip updating some output components.

\n\n
with gr.Blocks() as demo:\n    food_box = gr.Number(value=10, label=\"Food Count\")\n    status_box = gr.Textbox()\n    def eat(food):\n        if food > 0:\n            return {food_box: food - 1, status_box: \"full\"}\n        else:\n            return {status_box: \"hungry\"}\n    gr.Button(\"EAT\").click(\n        fn=eat, \n        inputs=food_box,\n        outputs=[food_box, status_box]\n    )\n
\n\n

Notice how when there is no food, we only update the status_box element. We skipped updating the food_box component.

\n\n

Dictionary returns are helpful when an event listener affects many components on return, or conditionally affects outputs and not others.

\n\n

Keep in mind that with dictionary returns, we still need to specify the possible outputs in the event listener.

\n\n

Updating Component Configurations

\n\n

The return value of an event listener function is usually the updated value of the corresponding output Component. Sometimes we want to update the configuration of the Component as well, such as the visibility. In this case, we return a gr.update() object instead of just the update Component value.

\n\n
import gradio as gr\n\ndef change_textbox(choice):\n    if choice == \"short\":\n        return gr.update(lines=2, visible=True, value=\"Short story: \")\n    elif choice == \"long\":\n        return gr.update(lines=8, visible=True, value=\"Long story...\")\n    else:\n        return gr.update(visible=False)\n\nwith gr.Blocks() as demo:\n    radio = gr.Radio(\n        [\"short\", \"long\", \"none\"], label=\"Essay Length to Write?\"\n    )\n    text = gr.Textbox(lines=2, interactive=True)\n    radio.change(fn=change_textbox, inputs=radio, outputs=text)\n\ndemo.launch()\n
\n\n

\n\n

See how we can configure the Textbox itself through the gr.update() method. The value= argument can still be used to update the value along with Component configuration.

\n\n

Running Events Consecutively

\n\n

You can also run events consecutively by using the then method of an event listener. This will run an event after the previous event has finished running. This is useful for running events that update components in multiple steps.

\n\n

For example, in the chatbot example below, we first update the chatbot with the user message immediately, and then update the chatbot with the computer response after a simulated delay.

\n\n
import gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.Button(\"Clear\")\n\n    def user(user_message, history):\n        return \"\", history + [[user_message, None]]\n\n    def bot(history):\n        bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n        time.sleep(2)\n        history[-1][1] = bot_message\n        return history\n\n    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.queue()\ndemo.launch()\n\n
\n\n

\n\n

The .then() method of an event listener executes the subsequent event regardless of whether the previous event raised any errors. If you'd like to only run subsequent events if the previous event executed successfully, use the .success() method, which takes the same arguments as .then().

\n\n

Running Events Continuously

\n\n

You can run events on a fixed schedule using the every parameter of the event listener. This will run the event\nevery number of seconds while the client connection is open. If the connection is closed, the event will stop running after the following iteration.\nNote that this does not take into account the runtime of the event itself. So a function\nwith a 1 second runtime running with every=5, would actually run every 6 seconds.

\n\n

Here is an example of a sine curve that updates every second!

\n\n
import math\nimport gradio as gr\nimport plotly.express as px\nimport numpy as np\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n    global plot_end\n    x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n    y = np.sin(2*math.pi*period * x)\n    fig = px.line(x=x, y=y)\n    plot_end += 2 * math.pi\n    if plot_end > 1000:\n        plot_end = 2 * math.pi\n    return fig\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            gr.Markdown(\"Change the value of the slider to automatically update the plot\")\n            period = gr.Slider(label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1)\n            plot = gr.Plot(label=\"Plot (updates every half second)\")\n\n    dep = demo.load(get_plot, None, plot, every=1)\n    period.change(get_plot, period, plot, every=1, cancels=[dep])\n\n\nif __name__ == \"__main__\":\n    demo.queue().launch()\n\n
\n\n

\n\n

Gathering Event Data

\n\n

You can gather specific data about an event by adding the associated event data class as a type hint to an argument in the event listener function.

\n\n

For example, event data for .select() can be type hinted by a gradio.SelectData argument. This event is triggered when a user selects some part of the triggering component, and the event data includes information about what the user specifically selected. If a user selected a specific word in a Textbox, a specific image in a Gallery, or a specific cell in a DataFrame, the event data argument would contain information about the specific selection.

\n\n

In the 2 player tic-tac-toe demo below, a user can select a cell in the DataFrame to make a move. The event data argument contains information about the specific cell that was selected. We can first check to see if the cell is empty, and then update the cell with the user's move.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n    board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n\n    def place(board, turn, evt: gr.SelectData):\n        if evt.value:\n            return board, turn\n        board[evt.index[0]][evt.index[1]] = turn\n        turn = \"O\" if turn == \"X\" else \"X\"\n        return board, turn\n\n    board.select(place, [board, turn], [board, turn])\n\ndemo.launch()\n
\n\n

\n", "tags": [], "spaces": [], "url": "/guides/blocks-and-event-listeners/", "contributor": null}, {"name": "controlling-layout", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 2, "absolute_index": 9, "pretty_name": "Controlling Layout", "content": "# Controlling Layout\n\nBy default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the [flexbox model of web development](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Flexible_Box_Layout/Basic_Concepts_of_Flexbox).\n\n## Rows\n\nElements within a `with gr.Row` clause will all be displayed horizontally. For example, to display two Buttons side by side:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn1 = gr.Button(\"Button 1\")\n btn2 = gr.Button(\"Button 2\")\n```\n\nTo make every element in a Row have the same height, use the `equal_height` argument of the `style` method.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row().style(equal_height=True):\n textbox = gr.Textbox()\n btn2 = gr.Button(\"Button 2\")\n```\n\nThe widths of elements in a Row can be controlled via a combination of `scale` and `min_width` arguments that are present in every Component.\n\n- `scale` is an integer that defines how an element will take up space in a Row. If scale is set to `0`, and element will not expand to take up space. If scale is set to `1` or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, `btn1` will expand twice as much as `btn2`, while `btn0` will not expand at all:\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n btn0 = gr.Button(\"Button 0\", scale=0)\n btn1 = gr.Button(\"Button 1\", scale=1)\n btn2 = gr.Button(\"Button 2\", scale=2)\n```\n\n- `min_width` will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all `min_width` values.\n\nLearn more about Rows in the [docs](https://gradio.app/docs/#row).\n\n## Columns and Nesting\n\nComponents within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n text1 = gr.Textbox(label=\"t1\")\n slider2 = gr.Textbox(label=\"s2\")\n drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n with gr.Row():\n with gr.Column(scale=1, min_width=600):\n text1 = gr.Textbox(label=\"prompt 1\")\n text2 = gr.Textbox(label=\"prompt 2\")\n inbtw = gr.Button(\"Between\")\n text4 = gr.Textbox(label=\"prompt 1\")\n text5 = gr.Textbox(label=\"prompt 2\")\n with gr.Column(scale=2, min_width=600):\n img1 = gr.Image(\"images/cheetah.jpg\")\n btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n```\n\n\nSee how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the `scale` parameter. The column with twice the `scale` value takes up twice the width.\n\nLearn more about Columns in the [docs](https://gradio.app/docs/#column).\n\n## Tabs and Accordions\n\nYou can also create Tabs using the `with gr.Tab('tab_name'):` clause. Any component created inside of a `with gr.Tab('tab_name'):` context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.\n\nFor example:\n\n```python\nimport numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n return x[::-1]\n\n\ndef flip_image(x):\n return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"Flip text or image files using this demo.\")\n with gr.Tab(\"Flip Text\"):\n text_input = gr.Textbox()\n text_output = gr.Textbox()\n text_button = gr.Button(\"Flip\")\n with gr.Tab(\"Flip Image\"):\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n image_button = gr.Button(\"Flip\")\n\n with gr.Accordion(\"Open for More!\"):\n gr.Markdown(\"Look at me...\")\n\n text_button.click(flip_text, inputs=text_input, outputs=text_output)\n image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n```\n\n\nAlso note the `gr.Accordion('label')` in this example. The Accordion is a layout that can be toggled open or closed. Like `Tabs`, it is a layout element that can selectively hide or show content. Any components that are defined inside of a `with gr.Accordion('label'):` will be hidden or shown when the accordion's toggle icon is clicked.\n\nLearn more about [Tabs](https://gradio.app/docs/#tab) and [Accordions](https://gradio.app/docs/#accordion) in the docs.\n\n## Visibility\n\nBoth Components and Layout elements have a `visible` argument that can set initially and also updated using `gr.update()`. Setting `gr.update(visible=...)` on a Column can be used to show or hide a set of Components.\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n error_box = gr.Textbox(label=\"Error\", visible=False)\n\n name_box = gr.Textbox(label=\"Name\")\n age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n submit_btn = gr.Button(\"Submit\")\n\n with gr.Column(visible=False) as output_col:\n diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n def submit(name, age, symptoms):\n if len(name) == 0:\n return {error_box: gr.update(value=\"Enter name\", visible=True)}\n return {\n output_col: gr.update(visible=True),\n diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n patient_summary_box: f\"{name}, {age} y/o\",\n }\n\n submit_btn.click(\n submit,\n [name_box, age_box, symptoms_box],\n [error_box, diagnosis_box, patient_summary_box, output_col],\n )\n\ndemo.launch()\n\n```\n\n\n## Variable Number of Outputs\n\nBy adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a *variable numbers of outputs*. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:\n\n```python\nimport gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n textboxes = []\n for i in range(max_textboxes):\n t = gr.Textbox(f\"Textbox {i}\")\n textboxes.append(t)\n\n s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n```\n\n\n## Defining and Rendering Components Separately\n\nIn some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using `gr.Examples` above the corresponding `gr.Textbox` input. Since `gr.Examples` requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the `gr.Examples` object.\n\nThe solution to this is to define the `gr.Textbox` outside of the `gr.Blocks()` scope and use the component's `.render()` method wherever you'd like it placed in the UI.\n\nHere's a full code example:\n\n```python\ninput_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n input_textbox.render()\n```\n", "html": "

Controlling Layout

\n\n

By default, Components in Blocks are arranged vertically. Let's take a look at how we can rearrange Components. Under the hood, this layout structure uses the flexbox model of web development.

\n\n

Rows

\n\n

Elements within a with gr.Row clause will all be displayed horizontally. For example, to display two Buttons side by side:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn1 = gr.Button(\"Button 1\")\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

To make every element in a Row have the same height, use the equal_height argument of the style method.

\n\n
with gr.Blocks() as demo:\n    with gr.Row().style(equal_height=True):\n        textbox = gr.Textbox()\n        btn2 = gr.Button(\"Button 2\")\n
\n\n

The widths of elements in a Row can be controlled via a combination of scale and min_width arguments that are present in every Component.

\n\n
    \n
  • scale is an integer that defines how an element will take up space in a Row. If scale is set to 0, and element will not expand to take up space. If scale is set to 1 or greater, the element well expand. Multiple elements in a row will expand proportional to their scale. Below, btn1 will expand twice as much as btn2, while btn0 will not expand at all:
  • \n
\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        btn0 = gr.Button(\"Button 0\", scale=0)\n        btn1 = gr.Button(\"Button 1\", scale=1)\n        btn2 = gr.Button(\"Button 2\", scale=2)\n
\n\n
    \n
  • min_width will set the minimum width the element will take. The Row will wrap if there isn't sufficient space to satisfy all min_width values.
  • \n
\n\n

Learn more about Rows in the docs.

\n\n

Columns and Nesting

\n\n

Components within a Column will be placed vertically atop each other. Since the vertical layout is the default layout for Blocks apps anyway, to be useful, Columns are usually nested within Rows. For example:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        text1 = gr.Textbox(label=\"t1\")\n        slider2 = gr.Textbox(label=\"s2\")\n        drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n    with gr.Row():\n        with gr.Column(scale=1, min_width=600):\n            text1 = gr.Textbox(label=\"prompt 1\")\n            text2 = gr.Textbox(label=\"prompt 2\")\n            inbtw = gr.Button(\"Between\")\n            text4 = gr.Textbox(label=\"prompt 1\")\n            text5 = gr.Textbox(label=\"prompt 2\")\n        with gr.Column(scale=2, min_width=600):\n            img1 = gr.Image(\"images/cheetah.jpg\")\n            btn = gr.Button(\"Go\").style(full_width=True)\n\ndemo.launch()\n
\n\n

\n\n

See how the first column has two Textboxes arranged vertically. The second column has an Image and Button arranged vertically. Notice how the relative widths of the two columns is set by the scale parameter. The column with twice the scale value takes up twice the width.

\n\n

Learn more about Columns in the docs.

\n\n

Tabs and Accordions

\n\n

You can also create Tabs using the with gr.Tab('tab_name'): clause. Any component created inside of a with gr.Tab('tab_name'): context appears in that tab. Consecutive Tab clauses are grouped together so that a single tab can be selected at one time, and only the components within that Tab's context are shown.

\n\n

For example:

\n\n
import numpy as np\nimport gradio as gr\n\n\ndef flip_text(x):\n    return x[::-1]\n\n\ndef flip_image(x):\n    return np.fliplr(x)\n\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"Flip text or image files using this demo.\")\n    with gr.Tab(\"Flip Text\"):\n        text_input = gr.Textbox()\n        text_output = gr.Textbox()\n        text_button = gr.Button(\"Flip\")\n    with gr.Tab(\"Flip Image\"):\n        with gr.Row():\n            image_input = gr.Image()\n            image_output = gr.Image()\n        image_button = gr.Button(\"Flip\")\n\n    with gr.Accordion(\"Open for More!\"):\n        gr.Markdown(\"Look at me...\")\n\n    text_button.click(flip_text, inputs=text_input, outputs=text_output)\n    image_button.click(flip_image, inputs=image_input, outputs=image_output)\n\ndemo.launch()\n\n
\n\n

\n\n

Also note the gr.Accordion('label') in this example. The Accordion is a layout that can be toggled open or closed. Like Tabs, it is a layout element that can selectively hide or show content. Any components that are defined inside of a with gr.Accordion('label'): will be hidden or shown when the accordion's toggle icon is clicked.

\n\n

Learn more about Tabs and Accordions in the docs.

\n\n

Visibility

\n\n

Both Components and Layout elements have a visible argument that can set initially and also updated using gr.update(). Setting gr.update(visible=...) on a Column can be used to show or hide a set of Components.

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    error_box = gr.Textbox(label=\"Error\", visible=False)\n\n    name_box = gr.Textbox(label=\"Name\")\n    age_box = gr.Number(label=\"Age\", minimum=0, maximum=100)\n    symptoms_box = gr.CheckboxGroup([\"Cough\", \"Fever\", \"Runny Nose\"])\n    submit_btn = gr.Button(\"Submit\")\n\n    with gr.Column(visible=False) as output_col:\n        diagnosis_box = gr.Textbox(label=\"Diagnosis\")\n        patient_summary_box = gr.Textbox(label=\"Patient Summary\")\n\n    def submit(name, age, symptoms):\n        if len(name) == 0:\n            return {error_box: gr.update(value=\"Enter name\", visible=True)}\n        return {\n            output_col: gr.update(visible=True),\n            diagnosis_box: \"covid\" if \"Cough\" in symptoms else \"flu\",\n            patient_summary_box: f\"{name}, {age} y/o\",\n        }\n\n    submit_btn.click(\n        submit,\n        [name_box, age_box, symptoms_box],\n        [error_box, diagnosis_box, patient_summary_box, output_col],\n    )\n\ndemo.launch()\n\n
\n\n

\n\n

Variable Number of Outputs

\n\n

By adjusting the visibility of components in a dynamic way, it is possible to create\ndemos with Gradio that support a variable numbers of outputs. Here's a very simple example\nwhere the number of output textboxes is controlled by an input slider:

\n\n
import gradio as gr\n\nmax_textboxes = 10\n\ndef variable_outputs(k):\n    k = int(k)\n    return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\nwith gr.Blocks() as demo:\n    s = gr.Slider(1, max_textboxes, value=max_textboxes, step=1, label=\"How many textboxes to show:\")\n    textboxes = []\n    for i in range(max_textboxes):\n        t = gr.Textbox(f\"Textbox {i}\")\n        textboxes.append(t)\n\n    s.change(variable_outputs, s, textboxes)\n\nif __name__ == \"__main__\":\n   demo.launch()\n\n
\n\n

\n\n

Defining and Rendering Components Separately

\n\n

In some cases, you might want to define components before you actually render them in your UI. For instance, you might want to show an examples section using gr.Examples above the corresponding gr.Textbox input. Since gr.Examples requires as a parameter the input component object, you will need to first define the input component, but then render it later, after you have defined the gr.Examples object.

\n\n

The solution to this is to define the gr.Textbox outside of the gr.Blocks() scope and use the component's .render() method wherever you'd like it placed in the UI.

\n\n

Here's a full code example:

\n\n
input_textbox = gr.Textbox()\n\nwith gr.Blocks() as demo:\n    gr.Examples([\"hello\", \"bonjour\", \"merhaba\"], input_textbox)\n    input_textbox.render()\n
\n", "tags": [], "spaces": [], "url": "/guides/controlling-layout/", "contributor": null}, {"name": "state-in-blocks", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 3, "absolute_index": 10, "pretty_name": "State In Blocks", "content": "# State in Blocks\n\nWe covered [State in Interfaces](https://gradio.app/interface-state), this guide takes a look at state in Blocks, which works mostly the same. \n\n## Global State\n\nGlobal state in Blocks works the same as in Interface. Any variable created outside a function call is a reference shared between all users.\n\n## Session State\n\nGradio supports session **state**, where data persists across multiple submits within a page session, in Blocks apps as well. To reiterate, session data is *not* shared between different users of your model. To store data in a session state, you need to do three things:\n\n1. Create a `gr.State()` object. If there is a default value to this stateful object, pass that into the constructor.\n2. In the event listener, put the `State` object as an input and output.\n3. In the event listener function, add the variable to the input parameters and the return value.\n\nLet's take a look at a game of hangman. \n\n```python\nimport gradio as gr\n\nsecret_word = \"gradio\"\n\nwith gr.Blocks() as demo: \n used_letters_var = gr.State([])\n with gr.Row() as row:\n with gr.Column():\n input_letter = gr.Textbox(label=\"Enter letter\")\n btn = gr.Button(\"Guess Letter\")\n with gr.Column():\n hangman = gr.Textbox(\n label=\"Hangman\",\n value=\"_\"*len(secret_word)\n )\n used_letters_box = gr.Textbox(label=\"Used Letters\")\n\n def guess_letter(letter, used_letters):\n used_letters.append(letter)\n answer = \"\".join([\n (letter if letter in used_letters else \"_\")\n for letter in secret_word\n ])\n return {\n used_letters_var: used_letters,\n used_letters_box: \", \".join(used_letters),\n hangman: answer\n }\n btn.click(\n guess_letter, \n [input_letter, used_letters_var],\n [used_letters_var, used_letters_box, hangman]\n )\ndemo.launch()\n```\n\n\nLet's see how we do each of the 3 steps listed above in this game:\n\n1. We store the used letters in `used_letters_var`. In the constructor of `State`, we set the initial value of this to `[]`, an empty list. \n2. In `btn.click()`, we have a reference to `used_letters_var` in both the inputs and outputs.\n3. In `guess_letter`, we pass the value of this `State` to `used_letters`, and then return an updated value of this `State` in the return statement.\n\nWith more complex apps, you will likely have many State variables storing session state in a single Blocks app.\n\nLearn more about `State` in the [docs](https://gradio.app/docs#state).\n\n\n\n", "html": "

State in Blocks

\n\n

We covered State in Interfaces, this guide takes a look at state in Blocks, which works mostly the same.

\n\n

Global State

\n\n

Global state in Blocks works the same as in Interface. Any variable created outside a function call is a reference shared between all users.

\n\n

Session State

\n\n

Gradio supports session state, where data persists across multiple submits within a page session, in Blocks apps as well. To reiterate, session data is not shared between different users of your model. To store data in a session state, you need to do three things:

\n\n
    \n
  1. Create a gr.State() object. If there is a default value to this stateful object, pass that into the constructor.
  2. \n
  3. In the event listener, put the State object as an input and output.
  4. \n
  5. In the event listener function, add the variable to the input parameters and the return value.
  6. \n
\n\n

Let's take a look at a game of hangman.

\n\n
import gradio as gr\n\nsecret_word = \"gradio\"\n\nwith gr.Blocks() as demo:    \n    used_letters_var = gr.State([])\n    with gr.Row() as row:\n        with gr.Column():\n            input_letter = gr.Textbox(label=\"Enter letter\")\n            btn = gr.Button(\"Guess Letter\")\n        with gr.Column():\n            hangman = gr.Textbox(\n                label=\"Hangman\",\n                value=\"_\"*len(secret_word)\n            )\n            used_letters_box = gr.Textbox(label=\"Used Letters\")\n\n    def guess_letter(letter, used_letters):\n        used_letters.append(letter)\n        answer = \"\".join([\n            (letter if letter in used_letters else \"_\")\n            for letter in secret_word\n        ])\n        return {\n            used_letters_var: used_letters,\n            used_letters_box: \", \".join(used_letters),\n            hangman: answer\n        }\n    btn.click(\n        guess_letter, \n        [input_letter, used_letters_var],\n        [used_letters_var, used_letters_box, hangman]\n        )\ndemo.launch()\n
\n\n

\n\n

Let's see how we do each of the 3 steps listed above in this game:

\n\n
    \n
  1. We store the used letters in used_letters_var. In the constructor of State, we set the initial value of this to [], an empty list.
  2. \n
  3. In btn.click(), we have a reference to used_letters_var in both the inputs and outputs.
  4. \n
  5. In guess_letter, we pass the value of this State to used_letters, and then return an updated value of this State in the return statement.
  6. \n
\n\n

With more complex apps, you will likely have many State variables storing session state in a single Blocks app.

\n\n

Learn more about State in the docs.

\n", "tags": [], "spaces": [], "url": "/guides/state-in-blocks/", "contributor": null}, {"name": "custom-CSS-and-JS", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 4, "absolute_index": 11, "pretty_name": "Custom CSS And JS", "content": "# Custom JS and CSS\n\nThis guide covers how to style Blocks with more flexibility, as well as adding Javascript code to event listeners. \n\n**Warning**: The use of query selectors in custom JS and CSS is *not* guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using query selectors sparingly.\n\n## Custom CSS\n\nGradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Blocks` constructor. For example:\n\n```python\nwith gr.Blocks(theme=gr.themes.Glass()):\n ...\n```\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. You can extend these themes or create your own themes from scratch - see the [Theming guide](/guides/theming-guide) for more details.\n\nFor additional styling ability, you can pass any CSS to your app using the `css=` kwarg.\n\nThe base class for the Gradio app is `gradio-container`, so here's an example that changes the background color of the Gradio app:\n```python\nwith gr.Blocks(css=\".gradio-container {background-color: red}\") as demo:\n ...\n```\n\nIf you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with `\"file=\"`, for example:\n\n```python\nwith gr.Blocks(css=\".gradio-container {background: url('file=clouds.jpg')}\") as demo:\n ...\n```\n\nYou can also pass the filepath to a CSS file to the `css` argument. \n\n## The `elem_id` and `elem_classes` Arguments\n\nYou can `elem_id` to add an HTML element `id` to any component, and `elem_classes` to add a class or list of classes. This will allow you to select elements more easily with CSS. This approach is also more likely to be stable across Gradio versions as built-in class names or ids may change (however, as mentioned in the warning above, we cannot guarantee complete compatibility between Gradio versions if you use custom CSS as the DOM elements may themselves change).\n\n```python\ncss = \"\"\"\n#warning {background-color: #FFCCCB} \n.feedback textarea {font-size: 24px !important}\n\"\"\"\n\nwith gr.Blocks(css=css) as demo:\n box1 = gr.Textbox(value=\"Good Job\", elem_classes=\"feedback\")\n box2 = gr.Textbox(value=\"Failure\", elem_id=\"warning\", elem_classes=\"feedback\")\n```\n\nThe CSS `#warning` ruleset will only target the second Textbox, while the `.feedback` ruleset will target both. Note that when targeting classes, you might need to put the `!important` selector to override the default Gradio styles.\n\n## Custom JS\n\nEvent listeners have a `_js` argument that can take a Javascript function as a string and treat it just like a Python event listener function. You can pass both a Javascript function and a Python function (in which case the Javascript function is run first) or only Javascript (and set the Python `fn` to `None`). Take a look at the code below:\n\n```python\nimport gradio as gr\n\nblocks = gr.Blocks()\n\nwith blocks as demo:\n subject = gr.Textbox(placeholder=\"subject\")\n verb = gr.Radio([\"ate\", \"loved\", \"hated\"])\n object = gr.Textbox(placeholder=\"object\")\n\n with gr.Row():\n btn = gr.Button(\"Create sentence.\")\n reverse_btn = gr.Button(\"Reverse sentence.\")\n foo_bar_btn = gr.Button(\"Append foo\")\n reverse_then_to_the_server_btn = gr.Button(\n \"Reverse sentence and send to server.\"\n )\n\n def sentence_maker(w1, w2, w3):\n return f\"{w1} {w2} {w3}\"\n\n output1 = gr.Textbox(label=\"output 1\")\n output2 = gr.Textbox(label=\"verb\")\n output3 = gr.Textbox(label=\"verb reversed\")\n output4 = gr.Textbox(label=\"front end process and then send to backend\")\n\n btn.click(sentence_maker, [subject, verb, object], output1)\n reverse_btn.click(\n None, [subject, verb, object], output2, _js=\"(s, v, o) => o + ' ' + v + ' ' + s\"\n )\n verb.change(lambda x: x, verb, output3, _js=\"(x) => [...x].reverse().join('')\")\n foo_bar_btn.click(None, [], subject, _js=\"(x) => x + ' foo'\")\n\n reverse_then_to_the_server_btn.click(\n sentence_maker,\n [subject, verb, object],\n output4,\n _js=\"(s, v, o) => [s, v, o].map(x => [...x].reverse().join(''))\",\n )\n\ndemo.launch()\n\n```\n", "html": "

Custom JS and CSS

\n\n

This guide covers how to style Blocks with more flexibility, as well as adding Javascript code to event listeners.

\n\n

Warning: The use of query selectors in custom JS and CSS is not guaranteed to work across Gradio versions as the Gradio HTML DOM may change. We recommend using query selectors sparingly.

\n\n

Custom CSS

\n\n

Gradio themes are the easiest way to customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Blocks constructor. For example:

\n\n
with gr.Blocks(theme=gr.themes.Glass()):\n    ...\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. You can extend these themes or create your own themes from scratch - see the Theming guide for more details.

\n\n

For additional styling ability, you can pass any CSS to your app using the css= kwarg.

\n\n

The base class for the Gradio app is gradio-container, so here's an example that changes the background color of the Gradio app:

\n\n
with gr.Blocks(css=\".gradio-container {background-color: red}\") as demo:\n    ...\n
\n\n

If you'd like to reference external files in your css, preface the file path (which can be a relative or absolute path) with \"file=\", for example:

\n\n
with gr.Blocks(css=\".gradio-container {background: url('file=clouds.jpg')}\") as demo:\n    ...\n
\n\n

You can also pass the filepath to a CSS file to the css argument.

\n\n

The elem_id and elem_classes Arguments

\n\n

You can elem_id to add an HTML element id to any component, and elem_classes to add a class or list of classes. This will allow you to select elements more easily with CSS. This approach is also more likely to be stable across Gradio versions as built-in class names or ids may change (however, as mentioned in the warning above, we cannot guarantee complete compatibility between Gradio versions if you use custom CSS as the DOM elements may themselves change).

\n\n
css = \"\"\"\n#warning {background-color: #FFCCCB} \n.feedback textarea {font-size: 24px !important}\n\"\"\"\n\nwith gr.Blocks(css=css) as demo:\n    box1 = gr.Textbox(value=\"Good Job\", elem_classes=\"feedback\")\n    box2 = gr.Textbox(value=\"Failure\", elem_id=\"warning\", elem_classes=\"feedback\")\n
\n\n

The CSS #warning ruleset will only target the second Textbox, while the .feedback ruleset will target both. Note that when targeting classes, you might need to put the !important selector to override the default Gradio styles.

\n\n

Custom JS

\n\n

Event listeners have a _js argument that can take a Javascript function as a string and treat it just like a Python event listener function. You can pass both a Javascript function and a Python function (in which case the Javascript function is run first) or only Javascript (and set the Python fn to None). Take a look at the code below:

\n\n
import gradio as gr\n\nblocks = gr.Blocks()\n\nwith blocks as demo:\n    subject = gr.Textbox(placeholder=\"subject\")\n    verb = gr.Radio([\"ate\", \"loved\", \"hated\"])\n    object = gr.Textbox(placeholder=\"object\")\n\n    with gr.Row():\n        btn = gr.Button(\"Create sentence.\")\n        reverse_btn = gr.Button(\"Reverse sentence.\")\n        foo_bar_btn = gr.Button(\"Append foo\")\n        reverse_then_to_the_server_btn = gr.Button(\n            \"Reverse sentence and send to server.\"\n        )\n\n    def sentence_maker(w1, w2, w3):\n        return f\"{w1} {w2} {w3}\"\n\n    output1 = gr.Textbox(label=\"output 1\")\n    output2 = gr.Textbox(label=\"verb\")\n    output3 = gr.Textbox(label=\"verb reversed\")\n    output4 = gr.Textbox(label=\"front end process and then send to backend\")\n\n    btn.click(sentence_maker, [subject, verb, object], output1)\n    reverse_btn.click(\n        None, [subject, verb, object], output2, _js=\"(s, v, o) => o + ' ' + v + ' ' + s\"\n    )\n    verb.change(lambda x: x, verb, output3, _js=\"(x) => [...x].reverse().join('')\")\n    foo_bar_btn.click(None, [], subject, _js=\"(x) => x + ' foo'\")\n\n    reverse_then_to_the_server_btn.click(\n        sentence_maker,\n        [subject, verb, object],\n        output4,\n        _js=\"(s, v, o) => [s, v, o].map(x => [...x].reverse().join(''))\",\n    )\n\ndemo.launch()\n\n
\n\n

\n", "tags": [], "spaces": [], "url": "/guides/custom-CSS-and-JS/", "contributor": null}, {"name": "using-blocks-like-functions", "category": "building-with-blocks", "pretty_category": "Building With Blocks", "guide_index": 5, "absolute_index": 12, "pretty_name": "Using Blocks Like Functions", "content": "# Using Gradio Blocks Like Functions\n\n\n\n**Prerequisite**: This Guide builds on the Blocks Introduction. Make sure to [read that guide first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control).\n\n## Introduction\n\nDid you know that apart from being a full-stack machine learning demo, a Gradio Blocks app is also a regular-old python function!?\n\nThis means that if you have a gradio Blocks (or Interface) app called `demo`, you can use `demo` like you would any python function.\n\nSo doing something like `output = demo(\"Hello\", \"friend\")` will run the first event defined in `demo` on the inputs \"Hello\" and \"friend\" and store it\nin the variable `output`.\n\nIf I put you to sleep \ud83e\udd71, please bear with me! By using apps like functions, you can seamlessly compose Gradio apps.\nThe following section will show how.\n\n## Treating Blocks like functions\n\nLet's say we have the following demo that translates english text to german text. \n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"t5-base\")\n\n\ndef translate(text):\n return pipe(text)[0][\"translation_text\"]\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n english = gr.Textbox(label=\"English text\")\n translate_btn = gr.Button(value=\"Translate\")\n with gr.Column():\n german = gr.Textbox(label=\"German Text\")\n\n translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n examples = gr.Examples(examples=[\"I went to the supermarket yesterday.\", \"Helen is a good swimmer.\"],\n inputs=[english])\n\ndemo.launch()\n```\n\nI already went ahead and hosted it in Hugging Face spaces at [gradio/english_translator](https://huggingface.co/spaces/gradio/english_translator).\n\nYou can see the demo below as well:\n\n\n\nNow, let's say you have an app that generates english text, but you wanted to additionally generate german text.\n\nYou could either:\n\n1. Copy the source code of my english-to-german translation and paste it in your app.\n\n2. Load my english-to-german translation in your app and treat it like a normal python function.\n\nOption 1 technically always works, but it often introduces unwanted complexity.\n\nOption 2 lets you borrow the functionality you want without tightly coupling our apps.\n\nAll you have to do is call the `Blocks.load` class method in your source file.\nAfter that, you can use my translation app like a regular python function!\n\nThe following code snippet and demo shows how to use `Blocks.load`.\n\nNote that the variable `english_translator` is my english to german app, but its used in `generate_text` like a regular function.\n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english_translator\")\nenglish_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n\n\ndef generate_text(text):\n english_text = english_generator(text)[0][\"generated_text\"]\n german_text = english_translator(english_text)\n return english_text, german_text\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n seed = gr.Text(label=\"Input Phrase\")\n with gr.Column():\n english = gr.Text(label=\"Generated English Text\")\n german = gr.Text(label=\"Generated German Text\")\n btn = gr.Button(\"Generate\")\n btn.click(generate_text, inputs=[seed], outputs=[english, german])\n gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n\ndemo.launch()\n```\n\n\n\n## How to control which function in the app to use\n\nIf the app you are loading defines more than one function, you can specify which function to use\nwith the `fn_index` and `api_name` parameters.\n\nIn the code for our english to german demo, you'll see the following line:\n\n```python\ntranslate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n```\n\nThe `api_name` gives this function a unique name in our app. You can use this name to tell gradio which\nfunction in the upstream space you want to use:\n\n```python\nenglish_generator(text, api_name=\"translate-to-german\")[0][\"generated_text\"]\n```\n\nYou can also use the `fn_index` parameter.\nImagine my app also defined an english to spanish translation function.\nIn order to use it in our text generation app, we would use the following code:\n\n```python\nenglish_generator(text, fn_index=1)[0][\"generated_text\"]\n```\n\nFunctions in gradio spaces are zero-indexed, so since the spanish translator would be the second function in my space,\nyou would use index 1. \n\n## Parting Remarks\n\nWe showed how treating a Blocks app like a regular python helps you compose functionality across different apps.\nAny Blocks app can be treated like a function, but a powerful pattern is to `load` an app hosted on \n[Hugging Face Spaces](https://huggingface.co/spaces) prior to treating it like a function in your own app.\nYou can also load models hosted on the [Hugging Face Model Hub](https://huggingface.co/models) - see the [Using Hugging Face Integrations](/using_hugging_face_integrations) guide for an example.\n\n### Happy building! \u2692\ufe0f\n", "html": "

Using Gradio Blocks Like Functions

\n\n

Prerequisite: This Guide builds on the Blocks Introduction. Make sure to read that guide first.

\n\n

Introduction

\n\n

Did you know that apart from being a full-stack machine learning demo, a Gradio Blocks app is also a regular-old python function!?

\n\n

This means that if you have a gradio Blocks (or Interface) app called demo, you can use demo like you would any python function.

\n\n

So doing something like output = demo(\"Hello\", \"friend\") will run the first event defined in demo on the inputs \"Hello\" and \"friend\" and store it\nin the variable output.

\n\n

If I put you to sleep \ud83e\udd71, please bear with me! By using apps like functions, you can seamlessly compose Gradio apps.\nThe following section will show how.

\n\n

Treating Blocks like functions

\n\n

Let's say we have the following demo that translates english text to german text.

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"t5-base\")\n\n\ndef translate(text):\n    return pipe(text)[0][\"translation_text\"]\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            english = gr.Textbox(label=\"English text\")\n            translate_btn = gr.Button(value=\"Translate\")\n        with gr.Column():\n            german = gr.Textbox(label=\"German Text\")\n\n    translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n    examples = gr.Examples(examples=[\"I went to the supermarket yesterday.\", \"Helen is a good swimmer.\"],\n                           inputs=[english])\n\ndemo.launch()\n
\n\n

I already went ahead and hosted it in Hugging Face spaces at gradio/english_translator.

\n\n

You can see the demo below as well:

\n\n

\n\n

Now, let's say you have an app that generates english text, but you wanted to additionally generate german text.

\n\n

You could either:

\n\n
    \n
  1. Copy the source code of my english-to-german translation and paste it in your app.

  2. \n
  3. Load my english-to-german translation in your app and treat it like a normal python function.

  4. \n
\n\n

Option 1 technically always works, but it often introduces unwanted complexity.

\n\n

Option 2 lets you borrow the functionality you want without tightly coupling our apps.

\n\n

All you have to do is call the Blocks.load class method in your source file.\nAfter that, you can use my translation app like a regular python function!

\n\n

The following code snippet and demo shows how to use Blocks.load.

\n\n

Note that the variable english_translator is my english to german app, but its used in generate_text like a regular function.

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\nenglish_translator = gr.Blocks.load(name=\"spaces/gradio/english_translator\")\nenglish_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n\n\ndef generate_text(text):\n    english_text = english_generator(text)[0][\"generated_text\"]\n    german_text = english_translator(english_text)\n    return english_text, german_text\n\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            seed = gr.Text(label=\"Input Phrase\")\n        with gr.Column():\n            english = gr.Text(label=\"Generated English Text\")\n            german = gr.Text(label=\"Generated German Text\")\n    btn = gr.Button(\"Generate\")\n    btn.click(generate_text, inputs=[seed], outputs=[english, german])\n    gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n\ndemo.launch()\n
\n\n

\n\n

How to control which function in the app to use

\n\n

If the app you are loading defines more than one function, you can specify which function to use\nwith the fn_index and api_name parameters.

\n\n

In the code for our english to german demo, you'll see the following line:

\n\n
translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n
\n\n

The api_name gives this function a unique name in our app. You can use this name to tell gradio which\nfunction in the upstream space you want to use:

\n\n
english_generator(text, api_name=\"translate-to-german\")[0][\"generated_text\"]\n
\n\n

You can also use the fn_index parameter.\nImagine my app also defined an english to spanish translation function.\nIn order to use it in our text generation app, we would use the following code:

\n\n
english_generator(text, fn_index=1)[0][\"generated_text\"]\n
\n\n

Functions in gradio spaces are zero-indexed, so since the spanish translator would be the second function in my space,\nyou would use index 1.

\n\n

Parting Remarks

\n\n

We showed how treating a Blocks app like a regular python helps you compose functionality across different apps.\nAny Blocks app can be treated like a function, but a powerful pattern is to load an app hosted on \nHugging Face Spaces prior to treating it like a function in your own app.\nYou can also load models hosted on the Hugging Face Model Hub - see the Using Hugging Face Integrations guide for an example.

\n\n

Happy building! \u2692\ufe0f

\n", "tags": ["TRANSLATION", "HUB", "SPACES"], "spaces": [], "url": "/guides/using-blocks-like-functions/", "contributor": null}]}, {"category": "Chatbots", "guides": [{"name": "creating-a-chatbot-fast", "category": "chatbots", "pretty_category": "Chatbots", "guide_index": 1, "absolute_index": 13, "pretty_name": "Creating A Chatbot Fast", "content": "# How to Create a Chatbot with Gradio\n\n\n\n## Introduction\n\nChatbots are a popular application of large language models. Using `gradio`, you can easily build a demo of your chatbot model and share that with your users, or try it yourself using an intuitive chatbot UI.\n\nThis tutorial uses `gr.ChatInterface()`, which is a high-level abstraction that allows you to create your chatbot UI fast, often with a single line of code. The chatbot interface that we create will look something like this:\n\n\n\nWe'll start with a couple of simple examples, and then show how to use `gr.ChatInterface()` with real language models from several popular APIs and libraries, including `langchain`, `openai`, and Hugging Face. \n\n**Prerequisites**: please make sure you are using the **latest version** version of Gradio: \n\n```bash\n$ pip install --upgrade gradio\n```\n\n## Defining a chat function\n\nWhen working with `gr.ChatInterface()`, the first thing you should do is define your chat function. Your chat function should take two arguments: `message` and then `history` (the arguments can be named anything, but must be in this order).\n\n* `message`: a `str` representing the user's input.\n* `history`: a `list` of `list` representing the conversations up until that point. Each inner list consists of two `str` representing a pair: `[user input, bot response]`. \n\nYour function should return a single string response, which is the bot's response to the particular user input `message`. Your function can take into account the `history` of messages, as well as the current message.\n\nLet's take a look at a few examples.\n\n## Example: a chatbot that responds yes or no\n\nLet's write a chat function that responds `Yes` or `No` randomly.\n\nHere's our chat function:\n\n```python\nimport random\n\ndef random_response(message, history):\n return random.choice([\"Yes\", \"No\"])\n```\n\nNow, we can plug this into `gr.ChatInterface()` and call the `.launch()` method to create the web interface:\n\n```python\nimport gradio as gr\n\ngr.ChatInterface(random_response).launch()\n```\n\nThat's it! Here's our running demo, try it out:\n\n\n\n## Another example using the user's input and history\n\nOf course, the previous example was very simplistic, it didn't even take user input or the previous history into account! Here's another simple example showing how to incorporate a user's input as well as the history.\n\n```python\nimport random\nimport gradio as gr\n\ndef alternatingly_agree(message, history):\n if len(history) % 2 == 0:\n return f\"Yes, I do think that '{message}'\"\n else:\n return \"I don't think so\"\n\ngr.ChatInterface(alternatingly_agree).launch()\n```\n\n## Streaming chatbots \n\nIf in your chat function, you use `yield` to generate a sequence of responses, you'll end up with a streaming chatbot. It's that simple!\n\n```python\nimport time\nimport gradio as gr\n\ndef slow_echo(message, history):\n for i in range(len(message)):\n time.sleep(0.3)\n yield \"You typed: \" + message[: i+1]\n\ngr.ChatInterface(slow_echo).queue().launch()\n```\n\nNotice that we've [enabled queuing](/guides/key-features#queuing), which is required to use generator functions. While the response is streaming, the \"Submit\" button turns into a \"Stop\" button that can be used to stop the generator function. You can customize the appearance and behavior of the \"Stop\" button using the `stop_btn` parameter.\n\n## Customizing your chatbot\n\nIf you're familiar with Gradio's `Interface` class, the `gr.ChatInterface` includes many of the same arguments that you can use to customize the look and feel of your Chatbot. For example, you can:\n\n* add a title and description above your chatbot using `title` and `description` arguments.\n* add a theme or custom css using `theme` and `css` arguments respectively.\n* add `examples` and even enable `cache_examples`, which make it easier for users to try it out .\n* You can change the text or disable each of the buttons that appear in the chatbot interface: `submit_btn`, `retry_btn`, `undo_btn`, `clear_btn`.\n\nIf you want to customize the `gr.Chatbot` or `gr.Textbox` that compose the `ChatInterface`, then you can pass in your own chatbot or textbox as well. Here's an example of how we can use these parameters:\n\n\n```python\nimport gradio as gr\n\ndef yes_man(message, history):\n if message.endswith(\"?\"):\n return \"Yes\"\n else:\n return \"Ask me anything!\"\n\ngr.ChatInterface(\n yes_man,\n chatbot=gr.Chatbot(height=300),\n textbox=gr.Textbox(placeholder=\"Ask me a yes or no question\", container=False, scale=7),\n title=\"Yes Man\",\n description=\"Ask Yes Man any question\",\n theme=\"soft\",\n examples=[\"Hello\", \"Am I cool?\", \"Are tomatoes vegetables?\"],\n cache_examples=True,\n retry_btn=None,\n undo_btn=\"Delete Previous\",\n clear_btn=\"Clear\",\n).launch()\n```\n\n## Additional Inputs\n\nYou may want to add additional parameters to your chatbot and expose them to your users through the Chatbot UI. For example, suppose you want to add a textbox for a system prompt, or a slider that sets the number of tokens in the chatbot's response. The `ChatInterface` class supports an `additional_inputs` parameter which can be used to add additional input components.\n\nThe `additional_inputs` parameters accepts a component or a list of components. You can pass the component instances directly, or use their string shortcuts (e.g. `\"textbox\"` instead of `gr.Textbox()`). If you pass in component instances, and they have *not* already been rendered, then the components will appear underneath the chatbot (and any examples) within a `gr.Accordion()`. You can set the label of this accordion using the `additional_inputs_accordion_name` parameter. \n\nHere's a complete example:\n\n```python\nimport gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n for i in range(min(len(response), int(tokens))):\n time.sleep(0.05)\n yield response[: i+1]\n\ndemo = gr.ChatInterface(echo, \n additional_inputs=[\n gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\"), \n gr.Slider(10, 100)\n ]\n )\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n```\n\nIf the components you pass into the `additional_inputs` have already been rendered in a parent `gr.Blocks()`, then they will *not* be re-rendered in the accordion. This provides flexibility in deciding where to lay out the input components. In the example below, we position the `gr.Textbox()` on top of the Chatbot UI, while keeping the slider underneath.\n\n```python\nimport gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n for i in range(min(len(response), int(tokens))):\n time.sleep(0.05)\n yield response[: i+1]\n\nwith gr.Blocks() as demo:\n system_prompt = gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\")\n slider = gr.Slider(10, 100, render=False)\n \n gr.ChatInterface(\n echo, additional_inputs=[system_prompt, slider]\n )\n\ndemo.queue().launch()\n```\n\nIf you need to create something even more custom, then its best to construct the chatbot UI using the low-level `gr.Blocks()` API. We have [a dedicated guide for that here](/guides/creating-a-custom-chatbot-with-blocks).\n\n## Using your chatbot via an API\n\nOnce you've built your Gradio chatbot and are hosting it on [Hugging Face Spaces](https://hf.space) or somewhere else, then you can query it with a simple API at the `/chat` endpoint. The endpoint just expects the user's message (and potentially additional inputs if you have set any using the `additional_inputs` parameter), and will return the response, internally keeping track of the messages sent so far.\n\n[](https://github.com/gradio-app/gradio/assets/1778297/7b10d6db-6476-4e2e-bebd-ecda802c3b8f)\n\nTo use the endpoint, you should use either the [Gradio Python Client](/guides/getting-started-with-the-python-client) or the [Gradio JS client](/guides/getting-started-with-the-js-client).\n\n## A `langchain` example\n\nNow, let's actually use the `gr.ChatInterface` with some real large language models. We'll start by using `langchain` on top of `openai` to build a general-purpose streaming chatbot application in 19 lines of code. You'll need to have an OpenAI key for this example (keep reading for the free, open-source equivalent!)\n\n```python\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import AIMessage, HumanMessage\nimport openai\nimport gradio as gr\n\nos.envrion[\"OPENAI_API_KEY\"] = \"sk-...\" # Replace with your key\n\nllm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')\n\ndef predict(message, history):\n history_langchain_format = []\n for human, ai in history:\n history_langchain_format.append(HumanMessage(content=human))\n history_langchain_format.append(AIMessage(content=ai))\n history_langchain_format.append(HumanMessage(content=message))\n gpt_response = llm(history_langchain_format)\n return gpt_response.content\n\ngr.ChatInterface(predict).launch() \n```\n\n## A streaming example using `openai`\n\nOf course, we could also use the `openai` library directy. Here a similar example, but this time with streaming results as well:\n\n\n```python\nimport openai\nimport gradio as gr\n\nopenai.api_key = \"sk-...\" # Replace with your key\n\ndef predict(message, history):\n history_openai_format = []\n for human, assistant in history:\n history_openai_format.append({\"role\": \"user\", \"content\": human })\n history_openai_format.append({\"role\": \"assistant\", \"content\":assistant})\n history_openai_format.append({\"role\": \"user\", \"content\": message})\n\n response = openai.ChatCompletion.create(\n model='gpt-3.5-turbo',\n messages= history_openai_format, \n temperature=1.0,\n stream=True\n )\n \n partial_message = \"\"\n for chunk in response:\n if len(chunk['choices'][0]['delta']) != 0:\n partial_message = partial_message + chunk['choices'][0]['delta']['content']\n yield partial_message \n\ngr.ChatInterface(predict).queue().launch() \n```\n\n## Example using a local, open-source LLM with Hugging Face\n\nOf course, in many cases you want to run a chatbot locally. Here's the equivalent example using Together's RedePajama model, from Hugging Face (this requires you to have a GPU with CUDA).\n\n```python\nimport gradio as gr\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer\nfrom threading import Thread\n\ntokenizer = AutoTokenizer.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\")\nmodel = AutoModelForCausalLM.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\", torch_dtype=torch.float16)\nmodel = model.to('cuda:0')\n\nclass StopOnTokens(StoppingCriteria):\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n stop_ids = [29, 0]\n for stop_id in stop_ids:\n if input_ids[0][-1] == stop_id:\n return True\n return False\n\ndef predict(message, history): \n\n history_transformer_format = history + [[message, \"\"]]\n stop = StopOnTokens()\n\n messages = \"\".join([\"\".join([\"\\n:\"+item[0], \"\\n:\"+item[1]]) #curr_system_message + \n for item in history_transformer_format])\n \n model_inputs = tokenizer([messages], return_tensors=\"pt\").to(\"cuda\")\n streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)\n generate_kwargs = dict(\n model_inputs,\n streamer=streamer,\n max_new_tokens=1024,\n do_sample=True,\n top_p=0.95,\n top_k=1000,\n temperature=1.0,\n num_beams=1,\n stopping_criteria=StoppingCriteriaList([stop])\n )\n t = Thread(target=model.generate, kwargs=generate_kwargs)\n t.start()\n\n partial_message = \"\"\n for new_token in streamer:\n if new_token != '<':\n partial_message += new_token\n yield partial_message \n \n\ngr.ChatInterface(predict).queue().launch()\n```\n\nWith those examples, you should be all set to create your own Gradio Chatbot demos soon! For building even more custom Chatbot applications, check out [a dedicated guide](/guides/creating-a-custom-chatbot-with-blocks) using the low-level `gr.Blocks()` API.", "html": "

How to Create a Chatbot with Gradio

\n\n

Introduction

\n\n

Chatbots are a popular application of large language models. Using gradio, you can easily build a demo of your chatbot model and share that with your users, or try it yourself using an intuitive chatbot UI.

\n\n

This tutorial uses gr.ChatInterface(), which is a high-level abstraction that allows you to create your chatbot UI fast, often with a single line of code. The chatbot interface that we create will look something like this:

\n\n

\n\n

We'll start with a couple of simple examples, and then show how to use gr.ChatInterface() with real language models from several popular APIs and libraries, including langchain, openai, and Hugging Face.

\n\n

Prerequisites: please make sure you are using the latest version version of Gradio:

\n\n
$ pip install --upgrade gradio\n
\n\n

Defining a chat function

\n\n

When working with gr.ChatInterface(), the first thing you should do is define your chat function. Your chat function should take two arguments: message and then history (the arguments can be named anything, but must be in this order).

\n\n
    \n
  • message: a str representing the user's input.
  • \n
  • history: a list of list representing the conversations up until that point. Each inner list consists of two str representing a pair: [user input, bot response].
  • \n
\n\n

Your function should return a single string response, which is the bot's response to the particular user input message. Your function can take into account the history of messages, as well as the current message.

\n\n

Let's take a look at a few examples.

\n\n

Example: a chatbot that responds yes or no

\n\n

Let's write a chat function that responds Yes or No randomly.

\n\n

Here's our chat function:

\n\n
import random\n\ndef random_response(message, history):\n    return random.choice([\"Yes\", \"No\"])\n
\n\n

Now, we can plug this into gr.ChatInterface() and call the .launch() method to create the web interface:

\n\n
import gradio as gr\n\ngr.ChatInterface(random_response).launch()\n
\n\n

That's it! Here's our running demo, try it out:

\n\n

\n\n

Another example using the user's input and history

\n\n

Of course, the previous example was very simplistic, it didn't even take user input or the previous history into account! Here's another simple example showing how to incorporate a user's input as well as the history.

\n\n
import random\nimport gradio as gr\n\ndef alternatingly_agree(message, history):\n    if len(history) % 2 == 0:\n        return f\"Yes, I do think that '{message}'\"\n    else:\n        return \"I don't think so\"\n\ngr.ChatInterface(alternatingly_agree).launch()\n
\n\n

Streaming chatbots

\n\n

If in your chat function, you use yield to generate a sequence of responses, you'll end up with a streaming chatbot. It's that simple!

\n\n
import time\nimport gradio as gr\n\ndef slow_echo(message, history):\n    for i in range(len(message)):\n        time.sleep(0.3)\n        yield \"You typed: \" + message[: i+1]\n\ngr.ChatInterface(slow_echo).queue().launch()\n
\n\n

Notice that we've enabled queuing, which is required to use generator functions. While the response is streaming, the \"Submit\" button turns into a \"Stop\" button that can be used to stop the generator function. You can customize the appearance and behavior of the \"Stop\" button using the stop_btn parameter.

\n\n

Customizing your chatbot

\n\n

If you're familiar with Gradio's Interface class, the gr.ChatInterface includes many of the same arguments that you can use to customize the look and feel of your Chatbot. For example, you can:

\n\n
    \n
  • add a title and description above your chatbot using title and description arguments.
  • \n
  • add a theme or custom css using theme and css arguments respectively.
  • \n
  • add examples and even enable cache_examples, which make it easier for users to try it out .
  • \n
  • You can change the text or disable each of the buttons that appear in the chatbot interface: submit_btn, retry_btn, undo_btn, clear_btn.
  • \n
\n\n

If you want to customize the gr.Chatbot or gr.Textbox that compose the ChatInterface, then you can pass in your own chatbot or textbox as well. Here's an example of how we can use these parameters:

\n\n
import gradio as gr\n\ndef yes_man(message, history):\n    if message.endswith(\"?\"):\n        return \"Yes\"\n    else:\n        return \"Ask me anything!\"\n\ngr.ChatInterface(\n    yes_man,\n    chatbot=gr.Chatbot(height=300),\n    textbox=gr.Textbox(placeholder=\"Ask me a yes or no question\", container=False, scale=7),\n    title=\"Yes Man\",\n    description=\"Ask Yes Man any question\",\n    theme=\"soft\",\n    examples=[\"Hello\", \"Am I cool?\", \"Are tomatoes vegetables?\"],\n    cache_examples=True,\n    retry_btn=None,\n    undo_btn=\"Delete Previous\",\n    clear_btn=\"Clear\",\n).launch()\n
\n\n

Additional Inputs

\n\n

You may want to add additional parameters to your chatbot and expose them to your users through the Chatbot UI. For example, suppose you want to add a textbox for a system prompt, or a slider that sets the number of tokens in the chatbot's response. The ChatInterface class supports an additional_inputs parameter which can be used to add additional input components.

\n\n

The additional_inputs parameters accepts a component or a list of components. You can pass the component instances directly, or use their string shortcuts (e.g. \"textbox\" instead of gr.Textbox()). If you pass in component instances, and they have not already been rendered, then the components will appear underneath the chatbot (and any examples) within a gr.Accordion(). You can set the label of this accordion using the additional_inputs_accordion_name parameter.

\n\n

Here's a complete example:

\n\n
import gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n    response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n    for i in range(min(len(response), int(tokens))):\n        time.sleep(0.05)\n        yield response[: i+1]\n\ndemo = gr.ChatInterface(echo, \n                        additional_inputs=[\n                            gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\"), \n                            gr.Slider(10, 100)\n                        ]\n                       )\n\nif __name__ == \"__main__\":\n    demo.queue().launch()\n
\n\n

If the components you pass into the additional_inputs have already been rendered in a parent gr.Blocks(), then they will not be re-rendered in the accordion. This provides flexibility in deciding where to lay out the input components. In the example below, we position the gr.Textbox() on top of the Chatbot UI, while keeping the slider underneath.

\n\n
import gradio as gr\nimport time\n\ndef echo(message, history, system_prompt, tokens):\n    response = f\"System prompt: {system_prompt}\\n Message: {message}.\"\n    for i in range(min(len(response), int(tokens))):\n        time.sleep(0.05)\n        yield response[: i+1]\n\nwith gr.Blocks() as demo:\n    system_prompt = gr.Textbox(\"You are helpful AI.\", label=\"System Prompt\")\n    slider = gr.Slider(10, 100, render=False)\n\n    gr.ChatInterface(\n        echo, additional_inputs=[system_prompt, slider]\n    )\n\ndemo.queue().launch()\n
\n\n

If you need to create something even more custom, then its best to construct the chatbot UI using the low-level gr.Blocks() API. We have a dedicated guide for that here.

\n\n

Using your chatbot via an API

\n\n

Once you've built your Gradio chatbot and are hosting it on Hugging Face Spaces or somewhere else, then you can query it with a simple API at the /chat endpoint. The endpoint just expects the user's message (and potentially additional inputs if you have set any using the additional_inputs parameter), and will return the response, internally keeping track of the messages sent so far.

\n\n

\n\n

To use the endpoint, you should use either the Gradio Python Client or the Gradio JS client.

\n\n

A langchain example

\n\n

Now, let's actually use the gr.ChatInterface with some real large language models. We'll start by using langchain on top of openai to build a general-purpose streaming chatbot application in 19 lines of code. You'll need to have an OpenAI key for this example (keep reading for the free, open-source equivalent!)

\n\n
from langchain.chat_models import ChatOpenAI\nfrom langchain.schema import AIMessage, HumanMessage\nimport openai\nimport gradio as gr\n\nos.envrion[\"OPENAI_API_KEY\"] = \"sk-...\"  # Replace with your key\n\nllm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')\n\ndef predict(message, history):\n    history_langchain_format = []\n    for human, ai in history:\n        history_langchain_format.append(HumanMessage(content=human))\n        history_langchain_format.append(AIMessage(content=ai))\n    history_langchain_format.append(HumanMessage(content=message))\n    gpt_response = llm(history_langchain_format)\n    return gpt_response.content\n\ngr.ChatInterface(predict).launch() \n
\n\n

A streaming example using openai

\n\n

Of course, we could also use the openai library directy. Here a similar example, but this time with streaming results as well:

\n\n
import openai\nimport gradio as gr\n\nopenai.api_key = \"sk-...\"  # Replace with your key\n\ndef predict(message, history):\n    history_openai_format = []\n    for human, assistant in history:\n        history_openai_format.append({\"role\": \"user\", \"content\": human })\n        history_openai_format.append({\"role\": \"assistant\", \"content\":assistant})\n    history_openai_format.append({\"role\": \"user\", \"content\": message})\n\n    response = openai.ChatCompletion.create(\n        model='gpt-3.5-turbo',\n        messages= history_openai_format,         \n        temperature=1.0,\n        stream=True\n    )\n\n    partial_message = \"\"\n    for chunk in response:\n        if len(chunk['choices'][0]['delta']) != 0:\n            partial_message = partial_message + chunk['choices'][0]['delta']['content']\n            yield partial_message \n\ngr.ChatInterface(predict).queue().launch() \n
\n\n

Example using a local, open-source LLM with Hugging Face

\n\n

Of course, in many cases you want to run a chatbot locally. Here's the equivalent example using Together's RedePajama model, from Hugging Face (this requires you to have a GPU with CUDA).

\n\n
import gradio as gr\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer\nfrom threading import Thread\n\ntokenizer = AutoTokenizer.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\")\nmodel = AutoModelForCausalLM.from_pretrained(\"togethercomputer/RedPajama-INCITE-Chat-3B-v1\", torch_dtype=torch.float16)\nmodel = model.to('cuda:0')\n\nclass StopOnTokens(StoppingCriteria):\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n        stop_ids = [29, 0]\n        for stop_id in stop_ids:\n            if input_ids[0][-1] == stop_id:\n                return True\n        return False\n\ndef predict(message, history): \n\n    history_transformer_format = history + [[message, \"\"]]\n    stop = StopOnTokens()\n\n    messages = \"\".join([\"\".join([\"\\n:\"+item[0], \"\\n:\"+item[1]])  #curr_system_message + \n                for item in history_transformer_format])\n\n    model_inputs = tokenizer([messages], return_tensors=\"pt\").to(\"cuda\")\n    streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)\n    generate_kwargs = dict(\n        model_inputs,\n        streamer=streamer,\n        max_new_tokens=1024,\n        do_sample=True,\n        top_p=0.95,\n        top_k=1000,\n        temperature=1.0,\n        num_beams=1,\n        stopping_criteria=StoppingCriteriaList([stop])\n        )\n    t = Thread(target=model.generate, kwargs=generate_kwargs)\n    t.start()\n\n    partial_message  = \"\"\n    for new_token in streamer:\n        if new_token != '<':\n            partial_message += new_token\n            yield partial_message \n\n\ngr.ChatInterface(predict).queue().launch()\n
\n\n

With those examples, you should be all set to create your own Gradio Chatbot demos soon! For building even more custom Chatbot applications, check out a dedicated guide using the low-level gr.Blocks() API.

\n", "tags": ["NLP", "TEXT", "CHAT"], "spaces": [], "url": "/guides/creating-a-chatbot-fast/", "contributor": null}, {"name": "creating-a-custom-chatbot-with-blocks", "category": "chatbots", "pretty_category": "Chatbots", "guide_index": 2, "absolute_index": 14, "pretty_name": "Creating A Custom Chatbot With Blocks", "content": "# How to Create a Custom Chatbot with Gradio Blocks\n\n\n\n\n## Introduction\n\n**Important Note**: if you are getting started, we recommend using the `gr.ChatInterface` to create chatbots -- its a high-level abstraction that makes it possible to create beautiful chatbot applications fast, often with a single line of code. [Read more about it here](/guides/creating-a-chatbot-fast).\n\nThis tutorial will show how to make chatbot UIs from scratch with Gradio's low-level Blocks API. This will give you full control over your Chatbot UI. You'll start by first creating a a simple chatbot to display text, a second one to stream text responses, and finally a chatbot that can handle media files as well. The chatbot interface that we create will look something like this:\n\n\n\n**Prerequisite**: We'll be using the `gradio.Blocks` class to build our Chatbot demo.\nYou can [read the Guide to Blocks first](https://gradio.app/quickstart/#blocks-more-flexibility-and-control) if you are not already familiar with it. Also please make sure you are using the **latest version** version of Gradio: `pip install --upgrade gradio`. \n\n## A Simple Chatbot Demo\n\nLet's start with recreating the simple demo above. As you may have noticed, our bot simply randomly responds \"How are you?\", \"I love you\", or \"I'm very hungry\" to any input. Here's the code to create this with Gradio:\n\n```python\nimport gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.ClearButton([msg, chatbot])\n\n def respond(message, chat_history):\n bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n chat_history.append((message, bot_message))\n time.sleep(2)\n return \"\", chat_history\n\n msg.submit(respond, [msg, chatbot], [msg, chatbot])\n\ndemo.launch()\n\n```\n\nThere are three Gradio components here:\n\n* A `Chatbot`, whose value stores the entire history of the conversation, as a list of response pairs between the user and bot.\n* A `Textbox` where the user can type their message, and then hit enter/submit to trigger the chatbot response\n* A `ClearButton` button to clear the Textbox and entire Chatbot history\n\nWe have a single function, `respond()`, which takes in the entire history of the chatbot, appends a random message, waits 1 second, and then returns the updated chat history. The `respond()` function also clears the textbox when it returns. \n\nOf course, in practice, you would replace `respond()` with your own more complex function, which might call a pretrained model or an API, to generate a response.\n\n\n\n\n## Add Streaming to your Chatbot\n\nThere are several ways we can improve the user experience of the chatbot above. First, we can stream responses so the user doesn't have to wait as long for a message to be generated. Second, we can have the user message appear immediately in the chat history, while the chatbot's response is being generated. Here's the code to achieve that: \n\n```python\nimport gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot()\n msg = gr.Textbox()\n clear = gr.Button(\"Clear\")\n\n def user(user_message, history):\n return \"\", history + [[user_message, None]]\n\n def bot(history):\n bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n history[-1][1] = \"\"\n for character in bot_message:\n history[-1][1] += character\n time.sleep(0.05)\n yield history\n\n msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n clear.click(lambda: None, None, chatbot, queue=False)\n \ndemo.queue()\ndemo.launch()\n\n```\n\n\nYou'll notice that when a user submits their message, we now *chain* three event events with `.then()`:\n\n1. The first method `user()` updates the chatbot with the user message and clears the input field. This method also makes the input field non interactive so that the user can't send another message while the chatbot is responding. Because we want this to happen instantly, we set `queue=False`, which would skip any queue had it been enabled. The chatbot's history is appended with `(user_message, None)`, the `None` signifying that the bot has not responded.\n\n2. The second method, `bot()` updates the chatbot history with the bot's response. Instead of creating a new message, we just replace the previously-created `None` message with the bot's response. Finally, we construct the message character by character and `yield` the intermediate outputs as they are being constructed. Gradio automatically turns any function with the `yield` keyword [into a streaming output interface](/guides/key-features/#iterative-outputs).\n\n3. The third method makes the input field interactive again so that users can send another message to the bot.\n\nOf course, in practice, you would replace `bot()` with your own more complex function, which might call a pretrained model or an API, to generate a response.\n\nFinally, we enable queuing by running `demo.queue()`, which is required for streaming intermediate outputs. You can try the improved chatbot by scrolling to the demo at the top of this page.\n\n## Adding Markdown, Images, Audio, or Videos\n\nThe `gr.Chatbot` component supports a subset of markdown including bold, italics, and code. For example, we could write a function that responds to a user's message, with a bold **That's cool!**, like this:\n\n```py\ndef bot(history):\n response = \"**That's cool!**\"\n history[-1][1] = response\n return history\n```\n\nIn addition, it can handle media files, such as images, audio, and video. To pass in a media file, we must pass in the file as a tuple of two strings, like this: `(filepath, alt_text)`. The `alt_text` is optional, so you can also just pass in a tuple with a single element `(filepath,)`, like this:\n\n```python\ndef add_file(history, file):\n history = history + [((file.name,), None)]\n return history\n```\n\nPutting this together, we can create a *multimodal* chatbot with a textbox for a user to submit text and an file upload button to submit images / audio / video files. The rest of the code looks pretty much the same as before:\n\n```python\nimport gradio as gr\nimport random\nimport time\n\n# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n\ndef add_text(history, text):\n history = history + [(text, None)]\n return history, gr.update(value=\"\", interactive=False)\n\n\ndef add_file(history, file):\n history = history + [((file.name,), None)]\n return history\n\n\ndef bot(history):\n response = \"**That's cool!**\"\n history[-1][1] = \"\"\n for character in response:\n history[-1][1] += character\n time.sleep(0.05)\n yield history\n\n\nwith gr.Blocks() as demo:\n chatbot = gr.Chatbot([], elem_id=\"chatbot\").style(height=750)\n\n with gr.Row():\n with gr.Column(scale=0.85):\n txt = gr.Textbox(\n show_label=False,\n placeholder=\"Enter text and press enter, or upload an image\",\n ).style(container=False)\n with gr.Column(scale=0.15, min_width=0):\n btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n\n txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n bot, chatbot, chatbot\n )\n txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n bot, chatbot, chatbot\n )\n\ndemo.queue()\ndemo.launch()\n\n```\n\n\nAnd you're done! That's all the code you need to build an interface for your chatbot model. Finally, we'll end our Guide with some links to Chatbots that are running on Spaces so that you can get an idea of what else is possible:\n\n* [project-baize/Baize-7B](https://huggingface.co/spaces/project-baize/Baize-7B): A stylized chatbot that allows you to stop generation as well as regenerate responses. \n* [MAGAer13/mPLUG-Owl](https://huggingface.co/spaces/MAGAer13/mPLUG-Owl): A multimodal chatbot that allows you to upvote and downvote responses. \n", "html": "

How to Create a Custom Chatbot with Gradio Blocks

\n\n

Introduction

\n\n

Important Note: if you are getting started, we recommend using the gr.ChatInterface to create chatbots -- its a high-level abstraction that makes it possible to create beautiful chatbot applications fast, often with a single line of code. Read more about it here.

\n\n

This tutorial will show how to make chatbot UIs from scratch with Gradio's low-level Blocks API. This will give you full control over your Chatbot UI. You'll start by first creating a a simple chatbot to display text, a second one to stream text responses, and finally a chatbot that can handle media files as well. The chatbot interface that we create will look something like this:

\n\n

\n\n

Prerequisite: We'll be using the gradio.Blocks class to build our Chatbot demo.\nYou can read the Guide to Blocks first if you are not already familiar with it. Also please make sure you are using the latest version version of Gradio: pip install --upgrade gradio.

\n\n

A Simple Chatbot Demo

\n\n

Let's start with recreating the simple demo above. As you may have noticed, our bot simply randomly responds \"How are you?\", \"I love you\", or \"I'm very hungry\" to any input. Here's the code to create this with Gradio:

\n\n
import gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.ClearButton([msg, chatbot])\n\n    def respond(message, chat_history):\n        bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n        chat_history.append((message, bot_message))\n        time.sleep(2)\n        return \"\", chat_history\n\n    msg.submit(respond, [msg, chatbot], [msg, chatbot])\n\ndemo.launch()\n\n
\n\n

There are three Gradio components here:

\n\n
    \n
  • A Chatbot, whose value stores the entire history of the conversation, as a list of response pairs between the user and bot.
  • \n
  • A Textbox where the user can type their message, and then hit enter/submit to trigger the chatbot response
  • \n
  • A ClearButton button to clear the Textbox and entire Chatbot history
  • \n
\n\n

We have a single function, respond(), which takes in the entire history of the chatbot, appends a random message, waits 1 second, and then returns the updated chat history. The respond() function also clears the textbox when it returns.

\n\n

Of course, in practice, you would replace respond() with your own more complex function, which might call a pretrained model or an API, to generate a response.

\n\n

\n\n

Add Streaming to your Chatbot

\n\n

There are several ways we can improve the user experience of the chatbot above. First, we can stream responses so the user doesn't have to wait as long for a message to be generated. Second, we can have the user message appear immediately in the chat history, while the chatbot's response is being generated. Here's the code to achieve that:

\n\n
import gradio as gr\nimport random\nimport time\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot()\n    msg = gr.Textbox()\n    clear = gr.Button(\"Clear\")\n\n    def user(user_message, history):\n        return \"\", history + [[user_message, None]]\n\n    def bot(history):\n        bot_message = random.choice([\"How are you?\", \"I love you\", \"I'm very hungry\"])\n        history[-1][1] = \"\"\n        for character in bot_message:\n            history[-1][1] += character\n            time.sleep(0.05)\n            yield history\n\n    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    clear.click(lambda: None, None, chatbot, queue=False)\n\ndemo.queue()\ndemo.launch()\n\n
\n\n

You'll notice that when a user submits their message, we now chain three event events with .then():

\n\n
    \n
  1. The first method user() updates the chatbot with the user message and clears the input field. This method also makes the input field non interactive so that the user can't send another message while the chatbot is responding. Because we want this to happen instantly, we set queue=False, which would skip any queue had it been enabled. The chatbot's history is appended with (user_message, None), the None signifying that the bot has not responded.

  2. \n
  3. The second method, bot() updates the chatbot history with the bot's response. Instead of creating a new message, we just replace the previously-created None message with the bot's response. Finally, we construct the message character by character and yield the intermediate outputs as they are being constructed. Gradio automatically turns any function with the yield keyword into a streaming output interface.

  4. \n
  5. The third method makes the input field interactive again so that users can send another message to the bot.

  6. \n
\n\n

Of course, in practice, you would replace bot() with your own more complex function, which might call a pretrained model or an API, to generate a response.

\n\n

Finally, we enable queuing by running demo.queue(), which is required for streaming intermediate outputs. You can try the improved chatbot by scrolling to the demo at the top of this page.

\n\n

Adding Markdown, Images, Audio, or Videos

\n\n

The gr.Chatbot component supports a subset of markdown including bold, italics, and code. For example, we could write a function that responds to a user's message, with a bold That's cool!, like this:

\n\n
def bot(history):\n    response = \"**That's cool!**\"\n    history[-1][1] = response\n    return history\n
\n\n

In addition, it can handle media files, such as images, audio, and video. To pass in a media file, we must pass in the file as a tuple of two strings, like this: (filepath, alt_text). The alt_text is optional, so you can also just pass in a tuple with a single element (filepath,), like this:

\n\n
def add_file(history, file):\n    history = history + [((file.name,), None)]\n    return history\n
\n\n

Putting this together, we can create a multimodal chatbot with a textbox for a user to submit text and an file upload button to submit images / audio / video files. The rest of the code looks pretty much the same as before:

\n\n
import gradio as gr\nimport random\nimport time\n\n# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n\ndef add_text(history, text):\n    history = history + [(text, None)]\n    return history, gr.update(value=\"\", interactive=False)\n\n\ndef add_file(history, file):\n    history = history + [((file.name,), None)]\n    return history\n\n\ndef bot(history):\n    response = \"**That's cool!**\"\n    history[-1][1] = \"\"\n    for character in response:\n        history[-1][1] += character\n        time.sleep(0.05)\n        yield history\n\n\nwith gr.Blocks() as demo:\n    chatbot = gr.Chatbot([], elem_id=\"chatbot\").style(height=750)\n\n    with gr.Row():\n        with gr.Column(scale=0.85):\n            txt = gr.Textbox(\n                show_label=False,\n                placeholder=\"Enter text and press enter, or upload an image\",\n            ).style(container=False)\n        with gr.Column(scale=0.15, min_width=0):\n            btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n\n    txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n        bot, chatbot, chatbot\n    )\n    txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n    file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n        bot, chatbot, chatbot\n    )\n\ndemo.queue()\ndemo.launch()\n\n
\n\n

\n\n

And you're done! That's all the code you need to build an interface for your chatbot model. Finally, we'll end our Guide with some links to Chatbots that are running on Spaces so that you can get an idea of what else is possible:

\n\n
    \n
  • project-baize/Baize-7B: A stylized chatbot that allows you to stop generation as well as regenerate responses.
  • \n
  • MAGAer13/mPLUG-Owl: A multimodal chatbot that allows you to upvote and downvote responses.
  • \n
\n", "tags": ["NLP", "TEXT", "CHAT"], "spaces": ["https://huggingface.co/spaces/gradio/chatbot_streaming", "https://huggingface.co/spaces/project-baize/Baize-7B", ""], "url": "/guides/creating-a-custom-chatbot-with-blocks/", "contributor": null}, {"name": "creating-a-discord-bot-from-a-gradio-app", "category": "chatbots", "pretty_category": "Chatbots", "guide_index": 3, "absolute_index": 15, "pretty_name": "Creating A Discord Bot From A Gradio App", "content": "# \ud83d\ude80 Creating Discord Bots from Gradio Apps \ud83d\ude80\n\n\n\nWe're excited to announce that Gradio can now automatically create a discord bot from a deployed app! \ud83e\udd16 \n\nDiscord is a popular communication platform that allows users to chat and interact with each other in real-time. By turning your Gradio app into a Discord bot, you can bring cutting edge AI to your discord server and give your community a whole new way to interact.\n\n## \ud83d\udcbb How does it work? \ud83d\udcbb\n\nWith `gradio_client` version `0.3.0`, any gradio `ChatInterface` app on the internet can automatically be deployed as a discord bot via the `deploy_discord` method of the `Client` class.\n\nTechnically, any gradio app that exposes an api route that takes in a single string and outputs a single string can be deployed to discord. In this guide, we will focus on `gr.ChatInterface` as those apps naturally lend themselves to discord's chat functionality.\n\n## \ud83d\udee0\ufe0f Requirements \ud83d\udee0\ufe0f\n\nMake sure you have the latest `gradio_client` and `gradio` versions installed.\n\n```bash\npip install gradio_client>=0.3.0 gradio>=3.38.0\n```\n\nAlso, make sure you have a [Hugging Face account](https://huggingface.co/) and a [write access token](https://huggingface.co/docs/hub/security-tokens).\n\n\u26a0\ufe0f Tip \u26a0\ufe0f: Make sure you login to the Hugging Face Hub by running `huggingface-cli login`. This will let you skip passing your token in all subsequent commands in this guide.\n\n## \ud83c\udfc3\u200d\u2640\ufe0f Quickstart \ud83c\udfc3\u200d\u2640\ufe0f\n\n### Step 1: Implementing our chatbot\nLet's build a very simple Chatbot using `ChatInterface` that simply repeats the user message. Write the following code into an `app.py`\n\n```python\nimport gradio as gr\n\ndef slow_echo(message, history):\n return message\n\ndemo = gr.ChatInterface(slow_echo).queue().launch()\n```\n\n### Step 2: Deploying our App\nIn order to create a discord bot for our app, it must be accessible over the internet. In this guide, we will use the `gradio deploy` command to deploy our chatbot to Hugging Face spaces from the command line. Run the following command.\n\n```bash\ngradio deploy --title echo-chatbot --app-file app.py\n```\n\nThis command will ask you some questions, e.g. requested hardware, requirements, but the default values will suffice for this guide.\nNote the URL of the space that was created. Mine is https://huggingface.co/spaces/freddyaboulton/echo-chatbot\n\n### Step 3: Creating a Discord Bot\nTurning our space into a discord bot is also a one-liner thanks to the `gradio deploy-discord`. Run the following command:\n\n```bash\ngradio deploy-discord --src freddyaboulton/echo-chatbot\n```\n\n\u2757\ufe0f Advanced \u2757\ufe0f: If you already have a discord bot token you can pass it to the `deploy-discord` command. Don't worry, if you don't have one yet!\n\n```bash\ngradio deploy-discord --src freddyaboulton/echo-chatbot --discord-bot-token \n```\n\nNote the URL that gets printed out to the console. Mine is https://huggingface.co/spaces/freddyaboulton/echo-chatbot-gradio-discord-bot\n\n### Step 4: Getting a Discord Bot Token\nIf you didn't have a discord bot token for step 3, go to the URL that got printed in the console and follow the instructions there.\nOnce you obtain a token, run the command again but this time pass in the token: \n\n```bash\ngradio deploy-discord --src freddyaboulton/echo-chatbot --discord-bot-token \n```\n\n### Step 5: Add the bot to your server\nVisit the space of your discord bot. You should see \"Add this bot to your server by clicking this link:\" followed by a URL. Go to that URL and add the bot to your server!\n\n### Step 6: Use your bot!\nBy default the bot can be called by starting a message with `/chat`, e.g. `/chat `.\n\n\u26a0\ufe0f Tip \u26a0\ufe0f: If either of the deployed spaces goes to sleep, the bot will stop working. By default, spaces go to sleep after 48 hours of inactivity. You can upgrade the hardware of your space to prevent it from going to sleep. See this [guide](https://huggingface.co/docs/hub/spaces-gpus#using-gpu-spaces) for more information.\n\n\n\n\n### Using the `gradio_client.Client` Class\nYou can also create a discord bot from a deployed gradio app with python.\n\n```python\nimport gradio_client as grc\ngrc.Client(\"freddyaboulton/echo-chatbot\").deploy_discord()\n```\n\n## \ud83e\uddbe Using State of The Art LLMs \ud83e\uddbe\n\nWe have created an organization on Hugging Face called [gradio-discord-bots](https://huggingface.co/gradio-discord-bots) containing several template spaces that explain how to deploy state of the art LLMs powered by gradio as discord bots.\n\nThe easiest way to get started is by deploying Meta's Llama 2 LLM with 70 billion parameter. Simply go to this [space](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-70b-chat-hf) and follow the instructions. \n\nThe deployment can be done in one line! \ud83e\udd2f\n\n```python\nimport gradio_client as grc\ngrc.Client(\"ysharma/Explore_llamav2_with_TGI\").deploy_discord(to_id=\"llama2-70b-discord-bot\")\n```\n\n## \ud83e\udd9c Additional LLMs \ud83e\udd9c\n\nIn addion to Meta's 70 billion Llama 2 model, we have prepared template spaces for the following LLMs and deployment options:\n\n* [gpt-3.5-turbo](https://huggingface.co/spaces/gradio-discord-bots/gpt-35-turbo), powered by openai. Required OpenAI key.\n* [falcon-7b-instruct](https://huggingface.co/spaces/gradio-discord-bots/falcon-7b-instruct) powered by Hugging Face Inference Endpoints.\n* [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/Llama-2-13b-chat-hf) powered by Hugging Face Inference Endpoints.\n* [Llama-2-13b-chat-hf](https://huggingface.co/spaces/gradio-discord-bots/llama-2-13b-chat-transformers) powered by Hugging Face transformers.\n\nTo deploy any of these models to discord, simply follow the instructions in the linked space for that model.\n\n## Deploying non-chat gradio apps to discord\n\nAs mentioned above, you don't need a `gr.ChatInterface` if you want to deploy your gradio app to discord. All that's needed is an api route that takes in a single string and outputs a single string. \n\nThe following code will deploy a space that translates english to german as a discord bot.\n\n```python\nimport gradio_client as grc\nclient = grc.Client(\"freddyaboulton/english-to-german\")\nclient.deploy_discord(api_names=['german'])\n```\n\n## Conclusion\n\nThat's it for this guide! We're really excited about this feature. Tag [@Gradio](https://twitter.com/Gradio) on twitter and show us how your discord community interacts with your discord bots. ", "html": "

\ud83d\ude80 Creating Discord Bots from Gradio Apps \ud83d\ude80

\n\n

We're excited to announce that Gradio can now automatically create a discord bot from a deployed app! \ud83e\udd16

\n\n

Discord is a popular communication platform that allows users to chat and interact with each other in real-time. By turning your Gradio app into a Discord bot, you can bring cutting edge AI to your discord server and give your community a whole new way to interact.

\n\n

\ud83d\udcbb How does it work? \ud83d\udcbb

\n\n

With gradio_client version 0.3.0, any gradio ChatInterface app on the internet can automatically be deployed as a discord bot via the deploy_discord method of the Client class.

\n\n

Technically, any gradio app that exposes an api route that takes in a single string and outputs a single string can be deployed to discord. In this guide, we will focus on gr.ChatInterface as those apps naturally lend themselves to discord's chat functionality.

\n\n

\ud83d\udee0\ufe0f Requirements \ud83d\udee0\ufe0f

\n\n

Make sure you have the latest gradio_client and gradio versions installed.

\n\n
pip install gradio_client>=0.3.0 gradio>=3.38.0\n
\n\n

Also, make sure you have a Hugging Face account and a write access token.

\n\n

\u26a0\ufe0f Tip \u26a0\ufe0f: Make sure you login to the Hugging Face Hub by running huggingface-cli login. This will let you skip passing your token in all subsequent commands in this guide.

\n\n

\ud83c\udfc3\u200d\u2640\ufe0f Quickstart \ud83c\udfc3\u200d\u2640\ufe0f

\n\n

Step 1: Implementing our chatbot

\n\n

Let's build a very simple Chatbot using ChatInterface that simply repeats the user message. Write the following code into an app.py

\n\n
import gradio as gr\n\ndef slow_echo(message, history):\n    return message\n\ndemo = gr.ChatInterface(slow_echo).queue().launch()\n
\n\n

Step 2: Deploying our App

\n\n

In order to create a discord bot for our app, it must be accessible over the internet. In this guide, we will use the gradio deploy command to deploy our chatbot to Hugging Face spaces from the command line. Run the following command.

\n\n
gradio deploy --title echo-chatbot --app-file app.py\n
\n\n

This command will ask you some questions, e.g. requested hardware, requirements, but the default values will suffice for this guide.\nNote the URL of the space that was created. Mine is https://huggingface.co/spaces/freddyaboulton/echo-chatbot

\n\n

Step 3: Creating a Discord Bot

\n\n

Turning our space into a discord bot is also a one-liner thanks to the gradio deploy-discord. Run the following command:

\n\n
gradio deploy-discord --src freddyaboulton/echo-chatbot\n
\n\n

\u2757\ufe0f Advanced \u2757\ufe0f: If you already have a discord bot token you can pass it to the deploy-discord command. Don't worry, if you don't have one yet!

\n\n
gradio deploy-discord --src freddyaboulton/echo-chatbot --discord-bot-token \n
\n\n

Note the URL that gets printed out to the console. Mine is https://huggingface.co/spaces/freddyaboulton/echo-chatbot-gradio-discord-bot

\n\n

Step 4: Getting a Discord Bot Token

\n\n

If you didn't have a discord bot token for step 3, go to the URL that got printed in the console and follow the instructions there.\nOnce you obtain a token, run the command again but this time pass in the token:

\n\n
gradio deploy-discord --src freddyaboulton/echo-chatbot --discord-bot-token \n
\n\n

Step 5: Add the bot to your server

\n\n

Visit the space of your discord bot. You should see \"Add this bot to your server by clicking this link:\" followed by a URL. Go to that URL and add the bot to your server!

\n\n

Step 6: Use your bot!

\n\n

By default the bot can be called by starting a message with /chat, e.g. /chat <your prompt here>.

\n\n

\u26a0\ufe0f Tip \u26a0\ufe0f: If either of the deployed spaces goes to sleep, the bot will stop working. By default, spaces go to sleep after 48 hours of inactivity. You can upgrade the hardware of your space to prevent it from going to sleep. See this guide for more information.

\n\n

\n\n

Using the gradio_client.Client Class

\n\n

You can also create a discord bot from a deployed gradio app with python.

\n\n
import gradio_client as grc\ngrc.Client(\"freddyaboulton/echo-chatbot\").deploy_discord()\n
\n\n

\ud83e\uddbe Using State of The Art LLMs \ud83e\uddbe

\n\n

We have created an organization on Hugging Face called gradio-discord-bots containing several template spaces that explain how to deploy state of the art LLMs powered by gradio as discord bots.

\n\n

The easiest way to get started is by deploying Meta's Llama 2 LLM with 70 billion parameter. Simply go to this space and follow the instructions.

\n\n

The deployment can be done in one line! \ud83e\udd2f

\n\n
import gradio_client as grc\ngrc.Client(\"ysharma/Explore_llamav2_with_TGI\").deploy_discord(to_id=\"llama2-70b-discord-bot\")\n
\n\n

\ud83e\udd9c Additional LLMs \ud83e\udd9c

\n\n

In addion to Meta's 70 billion Llama 2 model, we have prepared template spaces for the following LLMs and deployment options:

\n\n\n\n

To deploy any of these models to discord, simply follow the instructions in the linked space for that model.

\n\n

Deploying non-chat gradio apps to discord

\n\n

As mentioned above, you don't need a gr.ChatInterface if you want to deploy your gradio app to discord. All that's needed is an api route that takes in a single string and outputs a single string.

\n\n

The following code will deploy a space that translates english to german as a discord bot.

\n\n
import gradio_client as grc\nclient = grc.Client(\"freddyaboulton/english-to-german\")\nclient.deploy_discord(api_names=['german'])\n
\n\n

Conclusion

\n\n

That's it for this guide! We're really excited about this feature. Tag @Gradio on twitter and show us how your discord community interacts with your discord bots.

\n", "tags": ["NLP", "TEXT", "CHAT"], "spaces": [], "url": "/guides/creating-a-discord-bot-from-a-gradio-app/", "contributor": null}]}, {"category": "Integrating Other Frameworks", "guides": [{"name": "using-hugging-face-integrations", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": 1, "absolute_index": 16, "pretty_name": "Using Hugging Face Integrations", "content": "# Using Hugging Face Integrations\n\n\n\n\n\n\n## Introduction\n\nThe Hugging Face Hub is a central platform that has over 190,000 [models](https://huggingface.co/models), 32,000 [datasets](https://huggingface.co/datasets) and 40,000 [demos](https://huggingface.co/spaces), also known as Spaces. Although Hugging Face is famous for its \ud83e\udd17 transformers and diffusers libraries, the Hub also supports dozens of ML libraries, such as PyTorch, TensorFlow, spaCy, and many others across a variety of domains, from computer vision to reinforcement learning.\n\nGradio has multiple features that make it extremely easy to leverage existing models and Spaces on the Hub. This guide walks through these features.\n\n## Using regular inference with `pipeline`\n\nFirst, let's build a simple interface that translates text from English to Spanish. Between the over a thousand models shared by the University of Helsinki, there is an [existing model](https://huggingface.co/Helsinki-NLP/opus-mt-en-es), `opus-mt-en-es`, that does precisely this!\n\nThe \ud83e\udd17 transformers library has a very easy-to-use abstraction, [`pipeline()`](https://huggingface.co/docs/transformers/v4.16.2/en/main_classes/pipelines#transformers.pipeline) that handles most of the complex code to offer a simple API for common tasks. By specifying the task and an (optional) model, you can use an existing model with few lines:\n\n```python\nimport gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndef predict(text):\n return pipe(text)[0][\"translation_text\"]\n \ndemo = gr.Interface(\n fn=predict, \n inputs='text',\n outputs='text',\n)\n\ndemo.launch()\n```\n\nBut `gradio` actually makes it even easier to convert a `pipeline` to a demo, simply by using the `gradio.Interface.from_pipeline` methods, which skips the need to specify the input and output components:\n\n```python\nfrom transformers import pipeline\nimport gradio as gr\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndemo = gr.Interface.from_pipeline(pipe)\ndemo.launch()\n```\n\nThe previous code produces the following interface, which you can try right here in your browser: \n\n\n\n\n\n## Using Hugging Face Inference API\n\nHugging Face has a free service called the [Inference API](https://huggingface.co/inference-api), which allows you to send HTTP requests to models in the Hub. For transformers or diffusers-based models, the API can be 2 to 10 times faster than running the inference yourself. The API is free (rate limited), and you can switch to dedicated [Inference Endpoints](https://huggingface.co/pricing) when you want to use it in production.\n\nLet's try the same demo as above but using the Inference API instead of loading the model yourself. Given a Hugging Face model supported in the Inference API, Gradio can automatically infer the expected input and output and make the underlying server calls, so you don't have to worry about defining the prediction function. Here is what the code would look like!\n\n```python\nimport gradio as gr\n\ndemo = gr.load(\"Helsinki-NLP/opus-mt-en-es\", src=\"models\")\n\ndemo.launch()\n```\n\nNotice that we just put specify the model name and state that the `src` should be `models` (Hugging Face's Model Hub). There is no need to install any dependencies (except `gradio`) since you are not loading the model on your computer.\n\nYou might notice that the first inference takes about 20 seconds. This happens since the Inference API is loading the model in the server. You get some benefits afterward:\n\n* The inference will be much faster.\n* The server caches your requests.\n* You get built-in automatic scaling.\n\n## Hosting your Gradio demos\n\n[Hugging Face Spaces](https://hf.co/spaces) allows anyone to host their Gradio demos freely, and uploading your Gradio demos take a couple of minutes. You can head to [hf.co/new-space](https://huggingface.co/new-space), select the Gradio SDK, create an `app.py` file, and voila! You have a demo you can share with anyone else. To learn more, read [this guide how to host on Hugging Face Spaces using the website](https://huggingface.co/blog/gradio-spaces).\n\n\nAlternatively, you can create a Space programmatically, making use of the [huggingface_hub client library](https://huggingface.co/docs/huggingface_hub/index) library. Here's an example:\n\n```python\nfrom huggingface_hub import (\n create_repo,\n get_full_repo_name,\n upload_file,\n)\ncreate_repo(name=target_space_name, token=hf_token, repo_type=\"space\", space_sdk=\"gradio\")\nrepo_name = get_full_repo_name(model_id=target_space_name, token=hf_token)\nfile_url = upload_file(\n path_or_fileobj=\"file.txt\",\n path_in_repo=\"app.py\",\n repo_id=repo_name,\n repo_type=\"space\",\n token=hf_token,\n)\n```\nHere, `create_repo` creates a gradio repo with the target name under a specific account using that account's Write Token. `repo_name` gets the full repo name of the related repo. Finally `upload_file` uploads a file inside the repo with the name `app.py`.\n\n\n\n## Embedding your Space demo on other websites\n\nThroughout this guide, you've seen many embedded Gradio demos. You can also do this on own website! The first step is to create a Hugging Face Space with the demo you want to showcase. Then, [follow the steps here to embed the Space on your website](/guides/sharing-your-app/#embedding-hosted-spaces).\n\n\n## Loading demos from Spaces\n\nYou can also use and remix existing Gradio demos on Hugging Face Spaces. For example, you could take two existing Gradio demos and put them as separate tabs and create a new demo. You can run this new demo locally, or upload it to Spaces, allowing endless possibilities to remix and create new demos!\n\nHere's an example that does exactly that:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Tab(\"Translate to Spanish\"):\n gr.load(\"gradio/helsinki_translation_en_es\", src=\"spaces\")\n with gr.Tab(\"Translate to French\"):\n gr.load(\"abidlabs/en2fr\", src=\"spaces\")\n\ndemo.launch()\n```\n\nNotice that we use `gr.load()`, the same method we used to load models using the Inference API. However, here we specify that the `src` is `spaces` (Hugging Face Spaces).\n\n## Recap\n\nThat's it! Let's recap the various ways Gradio and Hugging Face work together:\n\n1. You can convert a `transformers` pipeline into a Gradio demo using `from_pipeline()`\n2. You can build a demo around the Inference API without having to load the model easily using `gr.load()`\n3. You host your Gradio demo on Hugging Face Spaces, either using the GUI or entirely in Python.\n4. You can embed Gradio demos that are hosted on Hugging Face Spaces onto your own website.\n5. You can load demos from Hugging Face Spaces to remix and create new Gradio demos using `gr.load()`.\n\n\n\ud83e\udd17\n", "html": "

Using Hugging Face Integrations

\n\n

Introduction

\n\n

The Hugging Face Hub is a central platform that has over 190,000 models, 32,000 datasets and 40,000 demos, also known as Spaces. Although Hugging Face is famous for its \ud83e\udd17 transformers and diffusers libraries, the Hub also supports dozens of ML libraries, such as PyTorch, TensorFlow, spaCy, and many others across a variety of domains, from computer vision to reinforcement learning.

\n\n

Gradio has multiple features that make it extremely easy to leverage existing models and Spaces on the Hub. This guide walks through these features.

\n\n

Using regular inference with pipeline

\n\n

First, let's build a simple interface that translates text from English to Spanish. Between the over a thousand models shared by the University of Helsinki, there is an existing model, opus-mt-en-es, that does precisely this!

\n\n

The \ud83e\udd17 transformers library has a very easy-to-use abstraction, pipeline() that handles most of the complex code to offer a simple API for common tasks. By specifying the task and an (optional) model, you can use an existing model with few lines:

\n\n
import gradio as gr\n\nfrom transformers import pipeline\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndef predict(text):\n  return pipe(text)[0][\"translation_text\"]\n\ndemo = gr.Interface(\n  fn=predict, \n  inputs='text',\n  outputs='text',\n)\n\ndemo.launch()\n
\n\n

But gradio actually makes it even easier to convert a pipeline to a demo, simply by using the gradio.Interface.from_pipeline methods, which skips the need to specify the input and output components:

\n\n
from transformers import pipeline\nimport gradio as gr\n\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-en-es\")\n\ndemo = gr.Interface.from_pipeline(pipe)\ndemo.launch()\n
\n\n

The previous code produces the following interface, which you can try right here in your browser:

\n\n

\n\n

Using Hugging Face Inference API

\n\n

Hugging Face has a free service called the Inference API, which allows you to send HTTP requests to models in the Hub. For transformers or diffusers-based models, the API can be 2 to 10 times faster than running the inference yourself. The API is free (rate limited), and you can switch to dedicated Inference Endpoints when you want to use it in production.

\n\n

Let's try the same demo as above but using the Inference API instead of loading the model yourself. Given a Hugging Face model supported in the Inference API, Gradio can automatically infer the expected input and output and make the underlying server calls, so you don't have to worry about defining the prediction function. Here is what the code would look like!

\n\n
import gradio as gr\n\ndemo = gr.load(\"Helsinki-NLP/opus-mt-en-es\", src=\"models\")\n\ndemo.launch()\n
\n\n

Notice that we just put specify the model name and state that the src should be models (Hugging Face's Model Hub). There is no need to install any dependencies (except gradio) since you are not loading the model on your computer.

\n\n

You might notice that the first inference takes about 20 seconds. This happens since the Inference API is loading the model in the server. You get some benefits afterward:

\n\n
    \n
  • The inference will be much faster.
  • \n
  • The server caches your requests.
  • \n
  • You get built-in automatic scaling.
  • \n
\n\n

Hosting your Gradio demos

\n\n

Hugging Face Spaces allows anyone to host their Gradio demos freely, and uploading your Gradio demos take a couple of minutes. You can head to hf.co/new-space, select the Gradio SDK, create an app.py file, and voila! You have a demo you can share with anyone else. To learn more, read this guide how to host on Hugging Face Spaces using the website.

\n\n

Alternatively, you can create a Space programmatically, making use of the huggingface_hub client library library. Here's an example:

\n\n
from huggingface_hub import (\n    create_repo,\n    get_full_repo_name,\n    upload_file,\n)\ncreate_repo(name=target_space_name, token=hf_token, repo_type=\"space\", space_sdk=\"gradio\")\nrepo_name = get_full_repo_name(model_id=target_space_name, token=hf_token)\nfile_url = upload_file(\n    path_or_fileobj=\"file.txt\",\n    path_in_repo=\"app.py\",\n    repo_id=repo_name,\n    repo_type=\"space\",\n    token=hf_token,\n)\n
\n\n

Here, create_repo creates a gradio repo with the target name under a specific account using that account's Write Token. repo_name gets the full repo name of the related repo. Finally upload_file uploads a file inside the repo with the name app.py.

\n\n

Embedding your Space demo on other websites

\n\n

Throughout this guide, you've seen many embedded Gradio demos. You can also do this on own website! The first step is to create a Hugging Face Space with the demo you want to showcase. Then, follow the steps here to embed the Space on your website.

\n\n

Loading demos from Spaces

\n\n

You can also use and remix existing Gradio demos on Hugging Face Spaces. For example, you could take two existing Gradio demos and put them as separate tabs and create a new demo. You can run this new demo locally, or upload it to Spaces, allowing endless possibilities to remix and create new demos!

\n\n

Here's an example that does exactly that:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n  with gr.Tab(\"Translate to Spanish\"):\n    gr.load(\"gradio/helsinki_translation_en_es\", src=\"spaces\")\n  with gr.Tab(\"Translate to French\"):\n    gr.load(\"abidlabs/en2fr\", src=\"spaces\")\n\ndemo.launch()\n
\n\n

Notice that we use gr.load(), the same method we used to load models using the Inference API. However, here we specify that the src is spaces (Hugging Face Spaces).

\n\n

Recap

\n\n

That's it! Let's recap the various ways Gradio and Hugging Face work together:

\n\n
    \n
  1. You can convert a transformers pipeline into a Gradio demo using from_pipeline()
  2. \n
  3. You can build a demo around the Inference API without having to load the model easily using gr.load()
  4. \n
  5. You host your Gradio demo on Hugging Face Spaces, either using the GUI or entirely in Python.
  6. \n
  7. You can embed Gradio demos that are hosted on Hugging Face Spaces onto your own website.
  8. \n
  9. You can load demos from Hugging Face Spaces to remix and create new Gradio demos using gr.load().
  10. \n
\n\n

\ud83e\udd17

\n", "tags": ["HUB", "SPACES", "EMBED"], "spaces": ["https://huggingface.co/spaces/gradio/helsinki_translation_en_es"], "url": "/guides/using-hugging-face-integrations/", "contributor": "Omar Sanseviero \ud83e\udd99 "}, {"name": "Gradio-and-Comet", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 17, "pretty_name": "Gradio And Comet", "content": "# Using Gradio and Comet\n\n\n\n\n## Introduction\n\nIn this guide we will demonstrate some of the ways you can use Gradio with Comet. We will cover the basics of using Comet with Gradio and show you some of the ways that you can leverage Gradio's advanced features such as [Embedding with iFrames](https://www.gradio.app/guides/sharing-your-app/#embedding-with-iframes) and [State](https://www.gradio.app/docs/#state) to build some amazing model evaluation workflows.\n\nHere is a list of the topics covered in this guide.\n\n1. Logging Gradio UI's to your Comet Experiments\n2. Embedding Gradio Applications directly into your Comet Projects\n3. Embedding Hugging Face Spaces directly into your Comet Projects\n4. Logging Model Inferences from your Gradio Application to Comet\n\n\n## What is Comet?\n\n[Comet](https://www.comet.com?utm_source=gradio&utm_medium=referral&utm_campaign=gradio-integration&utm_content=gradio-docs) is an MLOps Platform that is designed to help Data Scientists and Teams build better models faster! Comet provides tooling to Track, Explain, Manage, and Monitor your models in a single place! It works with Jupyter Notebooks and Scripts and most importantly it's 100% free!\n\n\n## Setup\n\nFirst, install the dependencies needed to run these examples\n\n```shell\npip install comet_ml torch torchvision transformers gradio shap requests Pillow\n```\n\nNext, you will need to [sign up for a Comet Account](https://www.comet.com/signup?utm_source=gradio&utm_medium=referral&utm_campaign=gradio-integration&utm_content=gradio-docs). Once you have your account set up, [grab your API Key](https://www.comet.com/docs/v2/guides/getting-started/quickstart/#get-an-api-key?utm_source=gradio&utm_medium=referral&utm_campaign=gradio-integration&utm_content=gradio-docs) and configure your Comet credentials\n\nIf you're running these examples as a script, you can either export your credentials as environment variables\n\n```shell\nexport COMET_API_KEY=\"\"\nexport COMET_WORKSPACE=\"\"\nexport COMET_PROJECT_NAME=\"\"\n```\n\nor set them in a `.comet.config` file in your working directory. You file should be formatted in the following way.\n\n```shell\n[comet]\napi_key=\nworkspace=\nproject_name=\n```\n\nIf you are using the provided Colab Notebooks to run these examples, please run the cell with the following snippet before starting the Gradio UI. Running this cell allows you to interactively add your API key to the notebook.\n\n```python\nimport comet_ml\ncomet_ml.init()\n```\n\n## 1. Logging Gradio UI's to your Comet Experiments\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/model-evaluation/gradio/notebooks/Gradio_and_Comet.ipynb)\n\nIn this example, we will go over how to log your Gradio Applications to Comet and interact with them using the Gradio Custom Panel.\n\nLet's start by building a simple Image Classification example using `resnet18`.\n\n```python\nimport comet_ml\n\nimport requests\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\n\ntorch.hub.download_url_to_file(\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n\nif torch.cuda.is_available():\n device = \"cuda\"\nelse:\n device = \"cpu\"\n\nmodel = torch.hub.load(\"pytorch/vision:v0.6.0\", \"resnet18\", pretrained=True).eval()\nmodel = model.to(device)\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef predict(inp):\n inp = Image.fromarray(inp.astype(\"uint8\"), \"RGB\")\n inp = transforms.ToTensor()(inp).unsqueeze(0)\n with torch.no_grad():\n prediction = torch.nn.functional.softmax(model(inp.to(device))[0], dim=0)\n return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\ninputs = gr.Image()\noutputs = gr.Label(num_top_classes=3)\n\nio = gr.Interface(\n fn=predict, inputs=inputs, outputs=outputs, examples=[\"dog.jpg\"]\n)\nio.launch(inline=False, share=True)\n\nexperiment = comet_ml.Experiment()\nexperiment.add_tag(\"image-classifier\")\n\nio.integrate(comet_ml=experiment)\n```\n\nThe last line in this snippet will log the URL of the Gradio Application to your Comet Experiment. You can find the URL in the Text Tab of your Experiment.\n\n\n\nAdd the Gradio Panel to your Experiment to interact with your application.\n\n\n\n\n## 2. Embedding Gradio Applications directly into your Comet Projects\n\n\n\nIf you are permanently hosting your Gradio application, you can embed the UI using the Gradio Panel Extended custom Panel.\n\nGo to your Comet Project page, and head over to the Panels tab. Click the `+ Add` button to bring up the Panels search page.\n\n\"adding-panels\"\n\nNext, search for Gradio Panel Extended in the Public Panels section and click `Add`.\n\n\"gradio-panel-extended\"\n\nOnce you have added your Panel, click `Edit` to access to the Panel Options page and paste in the URL of your Gradio application.\n\n![Edit-Gradio-Panel-Options](https://user-images.githubusercontent.com/7529846/214573001-23814b5a-ca65-4ace-a8a5-b27cdda70f7a.gif)\n\n\"Edit-Gradio-Panel-URL\"\n\n\n## 3. Embedding Hugging Face Spaces directly into your Comet Projects\n\n\n\nYou can also embed Gradio Applications that are hosted on Hugging Faces Spaces into your Comet Projects using the Hugging Face Spaces Panel.\n\nGo to your Comet Project page, and head over to the Panels tab. Click the `+ Add` button to bring up the Panels search page. Next, search for the Hugging Face Spaces Panel in the Public Panels section and click `Add`.\n\n\"huggingface-spaces-panel\"\n\nOnce you have added your Panel, click Edit to access to the Panel Options page and paste in the path of your Hugging Face Space e.g. `pytorch/ResNet`\n\n\"Edit-HF-Space\"\n\n## 4. Logging Model Inferences to Comet\n\n\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/model-evaluation/gradio/notebooks/Logging_Model_Inferences_with_Comet_and_Gradio.ipynb)\n\n\nIn the previous examples, we demonstrated the various ways in which you can interact with a Gradio application through the Comet UI. Additionally, you can also log model inferences, such as SHAP plots, from your Gradio application to Comet.\n\nIn the following snippet, we're going to log inferences from a Text Generation model. We can persist an Experiment across multiple inference calls using Gradio's [State](https://www.gradio.app/docs/#state) object. This will allow you to log multiple inferences from a model to a single Experiment.\n\n```python\nimport comet_ml\nimport gradio as gr\nimport shap\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nif torch.cuda.is_available():\n device = \"cuda\"\nelse:\n device = \"cpu\"\n\nMODEL_NAME = \"gpt2\"\n\nmodel = AutoModelForCausalLM.from_pretrained(MODEL_NAME)\n\n# set model decoder to true\nmodel.config.is_decoder = True\n# set text-generation params under task_specific_params\nmodel.config.task_specific_params[\"text-generation\"] = {\n \"do_sample\": True,\n \"max_length\": 50,\n \"temperature\": 0.7,\n \"top_k\": 50,\n \"no_repeat_ngram_size\": 2,\n}\nmodel = model.to(device)\n\ntokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)\nexplainer = shap.Explainer(model, tokenizer)\n\n\ndef start_experiment():\n \"\"\"Returns an APIExperiment object that is thread safe\n and can be used to log inferences to a single Experiment\n \"\"\"\n try:\n api = comet_ml.API()\n workspace = api.get_default_workspace()\n project_name = comet_ml.config.get_config()[\"comet.project_name\"]\n\n experiment = comet_ml.APIExperiment(\n workspace=workspace, project_name=project_name\n )\n experiment.log_other(\"Created from\", \"gradio-inference\")\n\n message = f\"Started Experiment: [{experiment.name}]({experiment.url})\"\n\n return (experiment, message)\n\n except Exception as e:\n return None, None\n\n\ndef predict(text, state, message):\n experiment = state\n\n shap_values = explainer([text])\n plot = shap.plots.text(shap_values, display=False)\n\n if experiment is not None:\n experiment.log_other(\"message\", message)\n experiment.log_html(plot)\n\n return plot\n\n\nwith gr.Blocks() as demo:\n start_experiment_btn = gr.Button(\"Start New Experiment\")\n experiment_status = gr.Markdown()\n\n # Log a message to the Experiment to provide more context\n experiment_message = gr.Textbox(label=\"Experiment Message\")\n experiment = gr.State()\n\n input_text = gr.Textbox(label=\"Input Text\", lines=5, interactive=True)\n submit_btn = gr.Button(\"Submit\")\n\n output = gr.HTML(interactive=True)\n\n start_experiment_btn.click(\n start_experiment, outputs=[experiment, experiment_status]\n )\n submit_btn.click(\n predict, inputs=[input_text, experiment, experiment_message], outputs=[output]\n )\n```\n\nInferences from this snippet will be saved in the HTML tab of your experiment.\n\n\n\n## Conclusion\n\nWe hope you found this guide useful and that it provides some inspiration to help you build awesome model evaluation workflows with Comet and Gradio.\n\n## How to contribute Gradio demos on HF spaces on the Comet organization\n\n* Create an account on Hugging Face [here](https://huggingface.co/join).\n* Add Gradio Demo under your username, see this [course](https://huggingface.co/course/chapter9/4?fw=pt) for setting up Gradio Demo on Hugging Face.\n* Request to join the Comet organization [here](https://huggingface.co/Comet).\n\n## Additional Resources\n\n* [Comet Documentation](https://www.comet.com/docs/v2/?utm_source=gradio&utm_medium=referral&utm_campaign=gradio-integration&utm_content=gradio-docs)\n", "html": "

Using Gradio and Comet

\n\n

Introduction

\n\n

In this guide we will demonstrate some of the ways you can use Gradio with Comet. We will cover the basics of using Comet with Gradio and show you some of the ways that you can leverage Gradio's advanced features such as Embedding with iFrames and State to build some amazing model evaluation workflows.

\n\n

Here is a list of the topics covered in this guide.

\n\n
    \n
  1. Logging Gradio UI's to your Comet Experiments
  2. \n
  3. Embedding Gradio Applications directly into your Comet Projects
  4. \n
  5. Embedding Hugging Face Spaces directly into your Comet Projects
  6. \n
  7. Logging Model Inferences from your Gradio Application to Comet
  8. \n
\n\n

What is Comet?

\n\n

Comet is an MLOps Platform that is designed to help Data Scientists and Teams build better models faster! Comet provides tooling to Track, Explain, Manage, and Monitor your models in a single place! It works with Jupyter Notebooks and Scripts and most importantly it's 100% free!

\n\n

Setup

\n\n

First, install the dependencies needed to run these examples

\n\n
pip install comet_ml torch torchvision transformers gradio shap requests Pillow\n
\n\n

Next, you will need to sign up for a Comet Account. Once you have your account set up, grab your API Key and configure your Comet credentials

\n\n

If you're running these examples as a script, you can either export your credentials as environment variables

\n\n
export COMET_API_KEY=\"\"\nexport COMET_WORKSPACE=\"\"\nexport COMET_PROJECT_NAME=\"\"\n
\n\n

or set them in a .comet.config file in your working directory. You file should be formatted in the following way.

\n\n
[comet]\napi_key=\nworkspace=\nproject_name=\n
\n\n

If you are using the provided Colab Notebooks to run these examples, please run the cell with the following snippet before starting the Gradio UI. Running this cell allows you to interactively add your API key to the notebook.

\n\n
import comet_ml\ncomet_ml.init()\n
\n\n

1. Logging Gradio UI's to your Comet Experiments

\n\n

\"Open

\n\n

In this example, we will go over how to log your Gradio Applications to Comet and interact with them using the Gradio Custom Panel.

\n\n

Let's start by building a simple Image Classification example using resnet18.

\n\n
import comet_ml\n\nimport requests\nimport torch\nfrom PIL import Image\nfrom torchvision import transforms\n\ntorch.hub.download_url_to_file(\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n\nif torch.cuda.is_available():\n    device = \"cuda\"\nelse:\n    device = \"cpu\"\n\nmodel = torch.hub.load(\"pytorch/vision:v0.6.0\", \"resnet18\", pretrained=True).eval()\nmodel = model.to(device)\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\n\ndef predict(inp):\n    inp = Image.fromarray(inp.astype(\"uint8\"), \"RGB\")\n    inp = transforms.ToTensor()(inp).unsqueeze(0)\n    with torch.no_grad():\n        prediction = torch.nn.functional.softmax(model(inp.to(device))[0], dim=0)\n    return {labels[i]: float(prediction[i]) for i in range(1000)}\n\n\ninputs = gr.Image()\noutputs = gr.Label(num_top_classes=3)\n\nio = gr.Interface(\n    fn=predict, inputs=inputs, outputs=outputs, examples=[\"dog.jpg\"]\n)\nio.launch(inline=False, share=True)\n\nexperiment = comet_ml.Experiment()\nexperiment.add_tag(\"image-classifier\")\n\nio.integrate(comet_ml=experiment)\n
\n\n

The last line in this snippet will log the URL of the Gradio Application to your Comet Experiment. You can find the URL in the Text Tab of your Experiment.

\n\n

\n\n

Add the Gradio Panel to your Experiment to interact with your application.

\n\n

\n\n

2. Embedding Gradio Applications directly into your Comet Projects

\n\n\n\n

If you are permanently hosting your Gradio application, you can embed the UI using the Gradio Panel Extended custom Panel.

\n\n

Go to your Comet Project page, and head over to the Panels tab. Click the + Add button to bring up the Panels search page.

\n\n

\"adding-panels\"

\n\n

Next, search for Gradio Panel Extended in the Public Panels section and click Add.

\n\n

\"gradio-panel-extended\"

\n\n

Once you have added your Panel, click Edit to access to the Panel Options page and paste in the URL of your Gradio application.

\n\n

\"Edit-Gradio-Panel-Options\"

\n\n

\"Edit-Gradio-Panel-URL\"

\n\n

3. Embedding Hugging Face Spaces directly into your Comet Projects

\n\n\n\n

You can also embed Gradio Applications that are hosted on Hugging Faces Spaces into your Comet Projects using the Hugging Face Spaces Panel.

\n\n

Go to your Comet Project page, and head over to the Panels tab. Click the + Add button to bring up the Panels search page. Next, search for the Hugging Face Spaces Panel in the Public Panels section and click Add.

\n\n

\"huggingface-spaces-panel\"

\n\n

Once you have added your Panel, click Edit to access to the Panel Options page and paste in the path of your Hugging Face Space e.g. pytorch/ResNet

\n\n

\"Edit-HF-Space\"

\n\n

4. Logging Model Inferences to Comet

\n\n\n\n

\"Open

\n\n

In the previous examples, we demonstrated the various ways in which you can interact with a Gradio application through the Comet UI. Additionally, you can also log model inferences, such as SHAP plots, from your Gradio application to Comet.

\n\n

In the following snippet, we're going to log inferences from a Text Generation model. We can persist an Experiment across multiple inference calls using Gradio's State object. This will allow you to log multiple inferences from a model to a single Experiment.

\n\n
import comet_ml\nimport gradio as gr\nimport shap\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nif torch.cuda.is_available():\n    device = \"cuda\"\nelse:\n    device = \"cpu\"\n\nMODEL_NAME = \"gpt2\"\n\nmodel = AutoModelForCausalLM.from_pretrained(MODEL_NAME)\n\n# set model decoder to true\nmodel.config.is_decoder = True\n# set text-generation params under task_specific_params\nmodel.config.task_specific_params[\"text-generation\"] = {\n    \"do_sample\": True,\n    \"max_length\": 50,\n    \"temperature\": 0.7,\n    \"top_k\": 50,\n    \"no_repeat_ngram_size\": 2,\n}\nmodel = model.to(device)\n\ntokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)\nexplainer = shap.Explainer(model, tokenizer)\n\n\ndef start_experiment():\n    \"\"\"Returns an APIExperiment object that is thread safe\n    and can be used to log inferences to a single Experiment\n    \"\"\"\n    try:\n        api = comet_ml.API()\n        workspace = api.get_default_workspace()\n        project_name = comet_ml.config.get_config()[\"comet.project_name\"]\n\n        experiment = comet_ml.APIExperiment(\n            workspace=workspace, project_name=project_name\n        )\n        experiment.log_other(\"Created from\", \"gradio-inference\")\n\n        message = f\"Started Experiment: [{experiment.name}]({experiment.url})\"\n\n        return (experiment, message)\n\n    except Exception as e:\n        return None, None\n\n\ndef predict(text, state, message):\n    experiment = state\n\n    shap_values = explainer([text])\n    plot = shap.plots.text(shap_values, display=False)\n\n    if experiment is not None:\n        experiment.log_other(\"message\", message)\n        experiment.log_html(plot)\n\n    return plot\n\n\nwith gr.Blocks() as demo:\n    start_experiment_btn = gr.Button(\"Start New Experiment\")\n    experiment_status = gr.Markdown()\n\n    # Log a message to the Experiment to provide more context\n    experiment_message = gr.Textbox(label=\"Experiment Message\")\n    experiment = gr.State()\n\n    input_text = gr.Textbox(label=\"Input Text\", lines=5, interactive=True)\n    submit_btn = gr.Button(\"Submit\")\n\n    output = gr.HTML(interactive=True)\n\n    start_experiment_btn.click(\n        start_experiment, outputs=[experiment, experiment_status]\n    )\n    submit_btn.click(\n        predict, inputs=[input_text, experiment, experiment_message], outputs=[output]\n    )\n
\n\n

Inferences from this snippet will be saved in the HTML tab of your experiment.

\n\n

\n\n

Conclusion

\n\n

We hope you found this guide useful and that it provides some inspiration to help you build awesome model evaluation workflows with Comet and Gradio.

\n\n

How to contribute Gradio demos on HF spaces on the Comet organization

\n\n
    \n
  • Create an account on Hugging Face here.
  • \n
  • Add Gradio Demo under your username, see this course for setting up Gradio Demo on Hugging Face.
  • \n
  • Request to join the Comet organization here.
  • \n
\n\n

Additional Resources

\n\n\n", "tags": ["COMET", "SPACES"], "spaces": [], "url": "/guides/Gradio-and-Comet/", "contributor": "the Comet team"}, {"name": "Gradio-and-ONNX-on-Hugging-Face", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 18, "pretty_name": "Gradio And ONNX On Hugging Face", "content": "# Gradio and ONNX on Hugging Face\n\n\n\n\n\n## Introduction\n\nIn this Guide, we'll walk you through:\n\n* Introduction of ONNX, ONNX model zoo, Gradio, and Hugging Face Spaces\n* How to setup a Gradio demo for EfficientNet-Lite4\n* How to contribute your own Gradio demos for the ONNX organization on Hugging Face\n\nHere's an example of an ONNX model: try out the EfficientNet-Lite4 demo below.\n\n\n\n## What is the ONNX Model Zoo?\nOpen Neural Network Exchange ([ONNX](https://onnx.ai/)) is an open standard format for representing machine learning models. ONNX is supported by a community of partners who have implemented it in many frameworks and tools. For example, if you have trained a model in TensorFlow or PyTorch, you can convert it to ONNX easily, and from there run it on a variety of devices using an engine/compiler like ONNX Runtime.\n\nThe [ONNX Model Zoo](https://github.com/onnx/models) is a collection of pre-trained, state-of-the-art models in the ONNX format contributed by community members. Accompanying each model are Jupyter notebooks for model training and running inference with the trained model. The notebooks are written in Python and include links to the training dataset as well as references to the original paper that describes the model architecture.\n\n\n## What are Hugging Face Spaces & Gradio?\n\n### Gradio\n\nGradio lets users demo their machine learning models as a web app all in python code. Gradio wraps a python function into a user interface and the demos can be launched inside jupyter notebooks, colab notebooks, as well as embedded in your own website and hosted on Hugging Face Spaces for free.\n\nGet started [here](https://gradio.app/getting_started)\n\n### Hugging Face Spaces\n\nHugging Face Spaces is a free hosting option for Gradio demos. Spaces comes with 3 SDK options: Gradio, Streamlit and Static HTML demos. Spaces can be public or private and the workflow is similar to github repos. There are over 2000+ spaces currently on Hugging Face. Learn more about spaces [here](https://huggingface.co/spaces/launch).\n\n### Hugging Face Models\n\nHugging Face Model Hub also supports ONNX models and ONNX models can be filtered through the [ONNX tag](https://huggingface.co/models?library=onnx&sort=downloads)\n\n## How did Hugging Face help the ONNX Model Zoo?\nThere are a lot of Jupyter notebooks in the ONNX Model Zoo for users to test models. Previously, users needed to download the models themselves and run those notebooks locally for testing. With Hugging Face, the testing process can be much simpler and more user-friendly. Users can easily try certain ONNX Model Zoo model on Hugging Face Spaces and run a quick demo powered by Gradio with ONNX Runtime, all on cloud without downloading anything locally. Note, there are various runtimes for ONNX, e.g., [ONNX Runtime](https://github.com/microsoft/onnxruntime), [MXNet](https://github.com/apache/incubator-mxnet).\n\n## What is the role of ONNX Runtime?\nONNX Runtime is a cross-platform inference and training machine-learning accelerator. It makes live Gradio demos with ONNX Model Zoo model on Hugging Face possible.\n\nONNX Runtime inference can enable faster customer experiences and lower costs, supporting models from deep learning frameworks such as PyTorch and TensorFlow/Keras as well as classical machine learning libraries such as scikit-learn, LightGBM, XGBoost, etc. ONNX Runtime is compatible with different hardware, drivers, and operating systems, and provides optimal performance by leveraging hardware accelerators where applicable alongside graph optimizations and transforms. For more information please see the [official website](https://onnxruntime.ai/).\n\n## Setting up a Gradio Demo for EfficientNet-Lite4\n\nEfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite models. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU. To learn more read the [model card](https://github.com/onnx/models/tree/main/vision/classification/efficientnet-lite4)\n\nHere we walk through setting up a example demo for EfficientNet-Lite4 using Gradio\n\nFirst we import our dependencies and download and load the efficientnet-lite4 model from the onnx model zoo. Then load the labels from the labels_map.txt file. We then setup our preprocessing functions, load the model for inference, and setup the inference function. Finally, the inference function is wrapped into a gradio interface for a user to interact with. See the full code below.\n\n\n```python\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport cv2\nimport json\nimport gradio as gr\nfrom huggingface_hub import hf_hub_download\nfrom onnx import hub\nimport onnxruntime as ort\n\n# loads ONNX model from ONNX Model Zoo\nmodel = hub.load(\"efficientnet-lite4\")\n# loads the labels text file\nlabels = json.load(open(\"labels_map.txt\", \"r\"))\n\n# sets image file dimensions to 224x224 by resizing and cropping image from center\ndef pre_process_edgetpu(img, dims):\n output_height, output_width, _ = dims\n img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)\n img = center_crop(img, output_height, output_width)\n img = np.asarray(img, dtype='float32')\n # converts jpg pixel value from [0 - 255] to float array [-1.0 - 1.0]\n img -= [127.0, 127.0, 127.0]\n img /= [128.0, 128.0, 128.0]\n return img\n\n# resizes the image with a proportional scale\ndef resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):\n height, width, _ = img.shape\n new_height = int(100. * out_height / scale)\n new_width = int(100. * out_width / scale)\n if height > width:\n w = new_width\n h = int(new_height * height / width)\n else:\n h = new_height\n w = int(new_width * width / height)\n img = cv2.resize(img, (w, h), interpolation=inter_pol)\n return img\n\n# crops the image around the center based on given height and width\ndef center_crop(img, out_height, out_width):\n height, width, _ = img.shape\n left = int((width - out_width) / 2)\n right = int((width + out_width) / 2)\n top = int((height - out_height) / 2)\n bottom = int((height + out_height) / 2)\n img = img[top:bottom, left:right]\n return img\n\n\nsess = ort.InferenceSession(model)\n\ndef inference(img):\n img = cv2.imread(img)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n \n img = pre_process_edgetpu(img, (224, 224, 3))\n \n img_batch = np.expand_dims(img, axis=0)\n\n results = sess.run([\"Softmax:0\"], {\"images:0\": img_batch})[0]\n result = reversed(results[0].argsort()[-5:])\n resultdic = {}\n for r in result:\n resultdic[labels[str(r)]] = float(results[0][r])\n return resultdic\n \ntitle = \"EfficientNet-Lite4\"\ndescription = \"EfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite model. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU.\"\nexamples = [['catonnx.jpg']]\ngr.Interface(inference, gr.Image(type=\"filepath\"), \"label\", title=title, description=description, examples=examples).launch()\n```\n\n\n## How to contribute Gradio demos on HF spaces using ONNX models\n\n* Add model to the [onnx model zoo](https://github.com/onnx/models/blob/main/.github/PULL_REQUEST_TEMPLATE.md)\n* Create an account on Hugging Face [here](https://huggingface.co/join).\n* See list of models left to add to ONNX organization, please refer to the table with the [Models list](https://github.com/onnx/models#models)\n* Add Gradio Demo under your username, see this [blog post](https://huggingface.co/blog/gradio-spaces) for setting up Gradio Demo on Hugging Face. \n* Request to join ONNX Organization [here](https://huggingface.co/onnx).\n* Once approved transfer model from your username to ONNX organization\n* Add a badge for model in model table, see examples in [Models list](https://github.com/onnx/models#models)\n", "html": "

Gradio and ONNX on Hugging Face

\n\n

Introduction

\n\n

In this Guide, we'll walk you through:

\n\n
    \n
  • Introduction of ONNX, ONNX model zoo, Gradio, and Hugging Face Spaces
  • \n
  • How to setup a Gradio demo for EfficientNet-Lite4
  • \n
  • How to contribute your own Gradio demos for the ONNX organization on Hugging Face
  • \n
\n\n

Here's an example of an ONNX model: try out the EfficientNet-Lite4 demo below.

\n\n\n\n

What is the ONNX Model Zoo?

\n\n

Open Neural Network Exchange (ONNX) is an open standard format for representing machine learning models. ONNX is supported by a community of partners who have implemented it in many frameworks and tools. For example, if you have trained a model in TensorFlow or PyTorch, you can convert it to ONNX easily, and from there run it on a variety of devices using an engine/compiler like ONNX Runtime.

\n\n

The ONNX Model Zoo is a collection of pre-trained, state-of-the-art models in the ONNX format contributed by community members. Accompanying each model are Jupyter notebooks for model training and running inference with the trained model. The notebooks are written in Python and include links to the training dataset as well as references to the original paper that describes the model architecture.

\n\n

What are Hugging Face Spaces & Gradio?

\n\n

Gradio

\n\n

Gradio lets users demo their machine learning models as a web app all in python code. Gradio wraps a python function into a user interface and the demos can be launched inside jupyter notebooks, colab notebooks, as well as embedded in your own website and hosted on Hugging Face Spaces for free.

\n\n

Get started here

\n\n

Hugging Face Spaces

\n\n

Hugging Face Spaces is a free hosting option for Gradio demos. Spaces comes with 3 SDK options: Gradio, Streamlit and Static HTML demos. Spaces can be public or private and the workflow is similar to github repos. There are over 2000+ spaces currently on Hugging Face. Learn more about spaces here.

\n\n

Hugging Face Models

\n\n

Hugging Face Model Hub also supports ONNX models and ONNX models can be filtered through the ONNX tag

\n\n

How did Hugging Face help the ONNX Model Zoo?

\n\n

There are a lot of Jupyter notebooks in the ONNX Model Zoo for users to test models. Previously, users needed to download the models themselves and run those notebooks locally for testing. With Hugging Face, the testing process can be much simpler and more user-friendly. Users can easily try certain ONNX Model Zoo model on Hugging Face Spaces and run a quick demo powered by Gradio with ONNX Runtime, all on cloud without downloading anything locally. Note, there are various runtimes for ONNX, e.g., ONNX Runtime, MXNet.

\n\n

What is the role of ONNX Runtime?

\n\n

ONNX Runtime is a cross-platform inference and training machine-learning accelerator. It makes live Gradio demos with ONNX Model Zoo model on Hugging Face possible.

\n\n

ONNX Runtime inference can enable faster customer experiences and lower costs, supporting models from deep learning frameworks such as PyTorch and TensorFlow/Keras as well as classical machine learning libraries such as scikit-learn, LightGBM, XGBoost, etc. ONNX Runtime is compatible with different hardware, drivers, and operating systems, and provides optimal performance by leveraging hardware accelerators where applicable alongside graph optimizations and transforms. For more information please see the official website.

\n\n

Setting up a Gradio Demo for EfficientNet-Lite4

\n\n

EfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite models. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU. To learn more read the model card

\n\n

Here we walk through setting up a example demo for EfficientNet-Lite4 using Gradio

\n\n

First we import our dependencies and download and load the efficientnet-lite4 model from the onnx model zoo. Then load the labels from the labels_map.txt file. We then setup our preprocessing functions, load the model for inference, and setup the inference function. Finally, the inference function is wrapped into a gradio interface for a user to interact with. See the full code below.

\n\n
import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport cv2\nimport json\nimport gradio as gr\nfrom huggingface_hub import hf_hub_download\nfrom onnx import hub\nimport onnxruntime as ort\n\n# loads ONNX model from ONNX Model Zoo\nmodel = hub.load(\"efficientnet-lite4\")\n# loads the labels text file\nlabels = json.load(open(\"labels_map.txt\", \"r\"))\n\n# sets image file dimensions to 224x224 by resizing and cropping image from center\ndef pre_process_edgetpu(img, dims):\n    output_height, output_width, _ = dims\n    img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)\n    img = center_crop(img, output_height, output_width)\n    img = np.asarray(img, dtype='float32')\n    # converts jpg pixel value from [0 - 255] to float array [-1.0 - 1.0]\n    img -= [127.0, 127.0, 127.0]\n    img /= [128.0, 128.0, 128.0]\n    return img\n\n# resizes the image with a proportional scale\ndef resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):\n    height, width, _ = img.shape\n    new_height = int(100. * out_height / scale)\n    new_width = int(100. * out_width / scale)\n    if height > width:\n        w = new_width\n        h = int(new_height * height / width)\n    else:\n        h = new_height\n        w = int(new_width * width / height)\n    img = cv2.resize(img, (w, h), interpolation=inter_pol)\n    return img\n\n# crops the image around the center based on given height and width\ndef center_crop(img, out_height, out_width):\n    height, width, _ = img.shape\n    left = int((width - out_width) / 2)\n    right = int((width + out_width) / 2)\n    top = int((height - out_height) / 2)\n    bottom = int((height + out_height) / 2)\n    img = img[top:bottom, left:right]\n    return img\n\n\nsess = ort.InferenceSession(model)\n\ndef inference(img):\n  img = cv2.imread(img)\n  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n  img = pre_process_edgetpu(img, (224, 224, 3))\n\n  img_batch = np.expand_dims(img, axis=0)\n\n  results = sess.run([\"Softmax:0\"], {\"images:0\": img_batch})[0]\n  result = reversed(results[0].argsort()[-5:])\n  resultdic = {}\n  for r in result:\n      resultdic[labels[str(r)]] = float(results[0][r])\n  return resultdic\n\ntitle = \"EfficientNet-Lite4\"\ndescription = \"EfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite model. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU.\"\nexamples = [['catonnx.jpg']]\ngr.Interface(inference, gr.Image(type=\"filepath\"), \"label\", title=title, description=description, examples=examples).launch()\n
\n\n

How to contribute Gradio demos on HF spaces using ONNX models

\n\n
    \n
  • Add model to the onnx model zoo
  • \n
  • Create an account on Hugging Face here.
  • \n
  • See list of models left to add to ONNX organization, please refer to the table with the Models list
  • \n
  • Add Gradio Demo under your username, see this blog post for setting up Gradio Demo on Hugging Face.
  • \n
  • Request to join ONNX Organization here.
  • \n
  • Once approved transfer model from your username to ONNX organization
  • \n
  • Add a badge for model in model table, see examples in Models list
  • \n
\n", "tags": ["ONNX", "SPACES"], "spaces": ["https://huggingface.co/spaces/onnx/EfficientNet-Lite4"], "url": "/guides/Gradio-and-ONNX-on-Hugging-Face/", "contributor": "Gradio and the ONNX team"}, {"name": "Gradio-and-Wandb-Integration", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 19, "pretty_name": "Gradio And Wandb Integration", "content": "# Gradio and W&B Integration\n\n\n\n\n\n## Introduction\n\nIn this Guide, we'll walk you through:\n\n* Introduction of Gradio, and Hugging Face Spaces, and Wandb\n* How to setup a Gradio demo using the Wandb integration for JoJoGAN\n* How to contribute your own Gradio demos after tracking your experiments on wandb to the Wandb organization on Hugging Face\n\nHere's an example of an model trained and experiments tracked on wandb, try out the JoJoGAN demo below.\n\n\n\n## What is Wandb?\n\nWeights and Biases (W&B) allows data scientists and machine learning scientists to track their machine learning experiments at every stage, from training to production. Any metric can be aggregated over samples and shown in panels in a customizable and searchable dashboard, like below:\n\n\"Screen\n\n\n## What are Hugging Face Spaces & Gradio?\n\n### Gradio\n\nGradio lets users demo their machine learning models as a web app, all in a few lines of Python. Gradio wraps any Python function (such as a machine learning model's inference function) into a user interface and the demos can be launched inside jupyter notebooks, colab notebooks, as well as embedded in your own website and hosted on Hugging Face Spaces for free.\n\nGet started [here](https://gradio.app/getting_started)\n\n### Hugging Face Spaces\n\nHugging Face Spaces is a free hosting option for Gradio demos. Spaces comes with 3 SDK options: Gradio, Streamlit and Static HTML demos. Spaces can be public or private and the workflow is similar to github repos. There are over 2000+ spaces currently on Hugging Face. Learn more about spaces [here](https://huggingface.co/spaces/launch).\n\n\n## Setting up a Gradio Demo for JoJoGAN\n\nNow, let's walk you through how to do this on your own. We'll make the assumption that you're new to W&B and Gradio for the purposes of this tutorial. \n\nLet's get started!\n\n1. Create a W&B account\n\n Follow [these quick instructions](https://app.wandb.ai/login) to create your free account if you don\u2019t have one already. It shouldn't take more than a couple minutes. Once you're done (or if you've already got an account), next, we'll run a quick colab. \n\n2. Open Colab Install Gradio and W&B\n\n We'll be following along with the colab provided in the JoJoGAN repo with some minor modifications to use Wandb and Gradio more effectively. \n\n [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mchong6/JoJoGAN/blob/main/stylize.ipynb)\n\n Install Gradio and Wandb at the top:\n\n```sh\n\npip install gradio wandb\n```\n\n3. Finetune StyleGAN and W&B experiment tracking\n\n This next step will open a W&B dashboard to track your experiments and a gradio panel showing pretrained models to choose from a drop down menu from a Gradio Demo hosted on Huggingface Spaces. Here's the code you need for that:\n\n ```python\n \n alpha = 1.0 \n alpha = 1-alpha\n\n preserve_color = True \n num_iter = 100 \n log_interval = 50 \n\n\n samples = []\n column_names = [\"Reference (y)\", \"Style Code(w)\", \"Real Face Image(x)\"]\n\n wandb.init(project=\"JoJoGAN\")\n config = wandb.config\n config.num_iter = num_iter\n config.preserve_color = preserve_color\n wandb.log(\n {\"Style reference\": [wandb.Image(transforms.ToPILImage()(target_im))]},\n step=0)\n\n # load discriminator for perceptual loss\n discriminator = Discriminator(1024, 2).eval().to(device)\n ckpt = torch.load('models/stylegan2-ffhq-config-f.pt', map_location=lambda storage, loc: storage)\n discriminator.load_state_dict(ckpt[\"d\"], strict=False)\n\n # reset generator\n del generator\n generator = deepcopy(original_generator)\n\n g_optim = optim.Adam(generator.parameters(), lr=2e-3, betas=(0, 0.99))\n\n # Which layers to swap for generating a family of plausible real images -> fake image\n if preserve_color:\n id_swap = [9,11,15,16,17]\n else:\n id_swap = list(range(7, generator.n_latent))\n\n for idx in tqdm(range(num_iter)):\n mean_w = generator.get_latent(torch.randn([latents.size(0), latent_dim]).to(device)).unsqueeze(1).repeat(1, generator.n_latent, 1)\n in_latent = latents.clone()\n in_latent[:, id_swap] = alpha*latents[:, id_swap] + (1-alpha)*mean_w[:, id_swap]\n\n img = generator(in_latent, input_is_latent=True)\n\n with torch.no_grad():\n real_feat = discriminator(targets)\n fake_feat = discriminator(img)\n\n loss = sum([F.l1_loss(a, b) for a, b in zip(fake_feat, real_feat)])/len(fake_feat)\n \n\n wandb.log({\"loss\": loss}, step=idx)\n if idx % log_interval == 0:\n generator.eval()\n my_sample = generator(my_w, input_is_latent=True)\n generator.train()\n my_sample = transforms.ToPILImage()(utils.make_grid(my_sample, normalize=True, range=(-1, 1)))\n wandb.log(\n {\"Current stylization\": [wandb.Image(my_sample)]},\n step=idx)\n table_data = [\n wandb.Image(transforms.ToPILImage()(target_im)),\n wandb.Image(img),\n wandb.Image(my_sample),\n ]\n samples.append(table_data)\n\n g_optim.zero_grad()\n loss.backward()\n g_optim.step()\n\n out_table = wandb.Table(data=samples, columns=column_names)\n wandb.log({\"Current Samples\": out_table})\n ```\n\nalpha = 1.0 \nalpha = 1-alpha\n\npreserve_color = True \nnum_iter = 100 \nlog_interval = 50 \n\n\nsamples = []\ncolumn_names = [\"Referece (y)\", \"Style Code(w)\", \"Real Face Image(x)\"]\n\nwandb.init(project=\"JoJoGAN\")\nconfig = wandb.config\nconfig.num_iter = num_iter\nconfig.preserve_color = preserve_color\nwandb.log(\n{\"Style reference\": [wandb.Image(transforms.ToPILImage()(target_im))]},\nstep=0)\n\n# load discriminator for perceptual loss\ndiscriminator = Discriminator(1024, 2).eval().to(device)\nckpt = torch.load('models/stylegan2-ffhq-config-f.pt', map_location=lambda storage, loc: storage)\ndiscriminator.load_state_dict(ckpt[\"d\"], strict=False)\n\n# reset generator\ndel generator\ngenerator = deepcopy(original_generator)\n\ng_optim = optim.Adam(generator.parameters(), lr=2e-3, betas=(0, 0.99))\n\n# Which layers to swap for generating a family of plausible real images -> fake image\nif preserve_color:\n id_swap = [9,11,15,16,17]\nelse:\n id_swap = list(range(7, generator.n_latent))\n\nfor idx in tqdm(range(num_iter)):\n mean_w = generator.get_latent(torch.randn([latents.size(0), latent_dim]).to(device)).unsqueeze(1).repeat(1, generator.n_latent, 1)\n in_latent = latents.clone()\n in_latent[:, id_swap] = alpha*latents[:, id_swap] + (1-alpha)*mean_w[:, id_swap]\n\n img = generator(in_latent, input_is_latent=True)\n\n with torch.no_grad():\n real_feat = discriminator(targets)\n fake_feat = discriminator(img)\n\n loss = sum([F.l1_loss(a, b) for a, b in zip(fake_feat, real_feat)])/len(fake_feat)\n \n\n wandb.log({\"loss\": loss}, step=idx)\n if idx % log_interval == 0:\n generator.eval()\n my_sample = generator(my_w, input_is_latent=True)\n generator.train()\n my_sample = transforms.ToPILImage()(utils.make_grid(my_sample, normalize=True, range=(-1, 1)))\n wandb.log(\n {\"Current stylization\": [wandb.Image(my_sample)]},\n step=idx)\n table_data = [\n wandb.Image(transforms.ToPILImage()(target_im)),\n wandb.Image(img),\n wandb.Image(my_sample),\n ]\n samples.append(table_data)\n\n g_optim.zero_grad()\n loss.backward()\n g_optim.step()\n\nout_table = wandb.Table(data=samples, columns=column_names)\nwandb.log({\"Current Samples\": out_table})\n```\n\n4. Save, Download, and Load Model\n\n Here's how to save and download your model.\n\n```python\n\nfrom PIL import Image\nimport torch\ntorch.backends.cudnn.benchmark = True\nfrom torchvision import transforms, utils\nfrom util import *\nimport math\nimport random\nimport numpy as np\nfrom torch import nn, autograd, optim\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\nimport lpips\nfrom model import *\nfrom e4e_projection import projection as e4e_projection\n\nfrom copy import deepcopy\nimport imageio\n\nimport os\nimport sys\nimport torchvision.transforms as transforms\nfrom argparse import Namespace\nfrom e4e.models.psp import pSp\nfrom util import *\nfrom huggingface_hub import hf_hub_download\nfrom google.colab import files\n\ntorch.save({\"g\": generator.state_dict()}, \"your-model-name.pt\")\n\nfiles.download('your-model-name.pt') \n\nlatent_dim = 512\ndevice=\"cuda\"\nmodel_path_s = hf_hub_download(repo_id=\"akhaliq/jojogan-stylegan2-ffhq-config-f\", filename=\"stylegan2-ffhq-config-f.pt\")\noriginal_generator = Generator(1024, latent_dim, 8, 2).to(device)\nckpt = torch.load(model_path_s, map_location=lambda storage, loc: storage)\noriginal_generator.load_state_dict(ckpt[\"g_ema\"], strict=False)\nmean_latent = original_generator.mean_latent(10000)\n\ngenerator = deepcopy(original_generator)\n\nckpt = torch.load(\"/content/JoJoGAN/your-model-name.pt\", map_location=lambda storage, loc: storage)\ngenerator.load_state_dict(ckpt[\"g\"], strict=False)\ngenerator.eval()\n\nplt.rcParams['figure.dpi'] = 150\n\n\n\ntransform = transforms.Compose(\n [\n transforms.Resize((1024, 1024)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]\n)\n\n\ndef inference(img): \n img.save('out.jpg') \n aligned_face = align_face('out.jpg')\n\n my_w = e4e_projection(aligned_face, \"out.pt\", device).unsqueeze(0) \n with torch.no_grad():\n my_sample = generator(my_w, input_is_latent=True)\n \n \n npimage = my_sample[0].cpu().permute(1, 2, 0).detach().numpy()\n imageio.imwrite('filename.jpeg', npimage)\n return 'filename.jpeg'\n```\n\n5. Build a Gradio Demo\n\n```python\n\nimport gradio as gr\n\ntitle = \"JoJoGAN\"\ndescription = \"Gradio Demo for JoJoGAN: One Shot Face Stylization. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.\"\n\ndemo = gr.Interface(\n inference, \n gr.Image(type=\"pil\"), \n gr.Image(type=\"file\"),\n title=title,\n description=description\n)\n\ndemo.launch(share=True)\n```\n\n6. Integrate Gradio into your W&B Dashboard\n\n The last step\u2014integrating your Gradio demo with your W&B dashboard\u2014is just one extra line:\n\n```python\n\ndemo.integrate(wandb=wandb)\n```\n\n Once you call integrate, a demo will be created and you can integrate it into your dashboard or report\n\n Outside of W&B with Web components, using the gradio-app tags allows anyone can embed Gradio demos on HF spaces directly into their blogs, websites, documentation, etc.:\n\n```html\n\n \n```\n\n\n7. (Optional) Embed W&B plots in your Gradio App\n\n It's also possible to embed W&B plots within Gradio apps. To do so, you can create a W&B Report of your plots and \n embed them within your Gradio app within a `gr.HTML` block. \n\n The Report will need to be public and you will need to wrap the URL within an iFrame like this: \n```python\n\nimport gradio as gr\n\ndef wandb_report(url):\n iframe = f'\n\n

What is Wandb?

\n\n

Weights and Biases (W&B) allows data scientists and machine learning scientists to track their machine learning experiments at every stage, from training to production. Any metric can be aggregated over samples and shown in panels in a customizable and searchable dashboard, like below:

\n\n

\"Screen

\n\n

What are Hugging Face Spaces & Gradio?

\n\n

Gradio

\n\n

Gradio lets users demo their machine learning models as a web app, all in a few lines of Python. Gradio wraps any Python function (such as a machine learning model's inference function) into a user interface and the demos can be launched inside jupyter notebooks, colab notebooks, as well as embedded in your own website and hosted on Hugging Face Spaces for free.

\n\n

Get started here

\n\n

Hugging Face Spaces

\n\n

Hugging Face Spaces is a free hosting option for Gradio demos. Spaces comes with 3 SDK options: Gradio, Streamlit and Static HTML demos. Spaces can be public or private and the workflow is similar to github repos. There are over 2000+ spaces currently on Hugging Face. Learn more about spaces here.

\n\n

Setting up a Gradio Demo for JoJoGAN

\n\n

Now, let's walk you through how to do this on your own. We'll make the assumption that you're new to W&B and Gradio for the purposes of this tutorial.

\n\n

Let's get started!

\n\n
    \n
  1. Create a W&B account

    \n\n

    Follow these quick instructions to create your free account if you don\u2019t have one already. It shouldn't take more than a couple minutes. Once you're done (or if you've already got an account), next, we'll run a quick colab.

  2. \n
  3. Open Colab Install Gradio and W&B

    \n\n

    We'll be following along with the colab provided in the JoJoGAN repo with some minor modifications to use Wandb and Gradio more effectively.

    \n\n

    \"Open

    \n\n

    Install Gradio and Wandb at the top:

  4. \n
\n\n
\npip install gradio wandb\n
\n\n
    \n
  1. Finetune StyleGAN and W&B experiment tracking

    \n\n

    This next step will open a W&B dashboard to track your experiments and a gradio panel showing pretrained models to choose from a drop down menu from a Gradio Demo hosted on Huggingface Spaces. Here's the code you need for that:

    \n\n
        \nalpha =  1.0 \nalpha = 1-alpha\n\npreserve_color = True \nnum_iter = 100 \nlog_interval = 50 \n\n\nsamples = []\ncolumn_names = [\"Reference (y)\", \"Style Code(w)\", \"Real Face Image(x)\"]\n\nwandb.init(project=\"JoJoGAN\")\nconfig = wandb.config\nconfig.num_iter = num_iter\nconfig.preserve_color = preserve_color\nwandb.log(\n{\"Style reference\": [wandb.Image(transforms.ToPILImage()(target_im))]},\nstep=0)\n\n

    load discriminator for perceptual loss

    \n\ndiscriminator = Discriminator(1024, 2).eval().to(device)\nckpt = torch.load('models/stylegan2-ffhq-config-f.pt', map_location=lambda storage, loc: storage)\ndiscriminator.load_state_dict(ckpt[\"d\"], strict=False)\n\n

    reset generator

    \n\ndel generator\ngenerator = deepcopy(original_generator)\n\ng_optim = optim.Adam(generator.parameters(), lr=2e-3, betas=(0, 0.99))\n\n

    Which layers to swap for generating a family of plausible real images -> fake image

    \n\nif preserve_color:\n id_swap = [9,11,15,16,17]\nelse:\n id_swap = list(range(7, generator.n_latent))\n\nfor idx in tqdm(range(num_iter)):\n mean_w = generator.get_latent(torch.randn([latents.size(0), latent_dim]).to(device)).unsqueeze(1).repeat(1, generator.n_latent, 1)\n in_latent = latents.clone()\n in_latent[:, id_swap] = alpha*latents[:, id_swap] + (1-alpha)*mean_w[:, id_swap]\n\n img = generator(in_latent, input_is_latent=True)\n\n with torch.no_grad():\n real_feat = discriminator(targets)\n fake_feat = discriminator(img)\n\n loss = sum([F.l1_loss(a, b) for a, b in zip(fake_feat, real_feat)])/len(fake_feat)\n\n\n wandb.log({\"loss\": loss}, step=idx)\n if idx % log_interval == 0:\n generator.eval()\n my_sample = generator(my_w, input_is_latent=True)\n generator.train()\n my_sample = transforms.ToPILImage()(utils.make_grid(my_sample, normalize=True, range=(-1, 1)))\n wandb.log(\n {\"Current stylization\": [wandb.Image(my_sample)]},\n step=idx)\n table_data = [\n wandb.Image(transforms.ToPILImage()(target_im)),\n wandb.Image(img),\n wandb.Image(my_sample),\n ]\n samples.append(table_data)\n\n g_optim.zero_grad()\n loss.backward()\n g_optim.step()\n\nout_table = wandb.Table(data=samples, columns=column_names)\nwandb.log({\"Current Samples\": out_table})\n
  2. \n
\n\n

alpha = 1.0 \nalpha = 1-alpha

\n\n

preservecolor = True \nnumiter = 100 \nlog_interval = 50

\n\n

samples = []\ncolumn_names = [\"Referece (y)\", \"Style Code(w)\", \"Real Face Image(x)\"]

\n\n

wandb.init(project=\"JoJoGAN\")\nconfig = wandb.config\nconfig.numiter = numiter\nconfig.preservecolor = preservecolor\nwandb.log(\n{\"Style reference\": [wandb.Image(transforms.ToPILImage()(target_im))]},\nstep=0)

\n\n

load discriminator for perceptual loss

\n\n

discriminator = Discriminator(1024, 2).eval().to(device)\nckpt = torch.load('models/stylegan2-ffhq-config-f.pt', maplocation=lambda storage, loc: storage)\ndiscriminator.loadstate_dict(ckpt[\"d\"], strict=False)

\n\n

reset generator

\n\n

del generator\ngenerator = deepcopy(original_generator)

\n\n

g_optim = optim.Adam(generator.parameters(), lr=2e-3, betas=(0, 0.99))

\n\n

Which layers to swap for generating a family of plausible real images -> fake image

\n\n

if preservecolor:\n idswap = [9,11,15,16,17]\nelse:\n idswap = list(range(7, generator.nlatent))

\n\n

for idx in tqdm(range(numiter)):\n meanw = generator.getlatent(torch.randn([latents.size(0), latentdim]).to(device)).unsqueeze(1).repeat(1, generator.nlatent, 1)\n inlatent = latents.clone()\n inlatent[:, idswap] = alphalatents[:, id_swap] + (1-alpha)meanw[:, idswap]

\n\n

img = generator(inlatent, inputis_latent=True)

\n\n

with torch.nograd():\n realfeat = discriminator(targets)\n fake_feat = discriminator(img)

\n\n

loss = sum([F.l1loss(a, b) for a, b in zip(fakefeat, realfeat)])/len(fakefeat)

\n\n

wandb.log({\"loss\": loss}, step=idx)\n if idx % loginterval == 0:\n generator.eval()\n mysample = generator(myw, inputislatent=True)\n generator.train()\n mysample = transforms.ToPILImage()(utils.makegrid(mysample, normalize=True, range=(-1, 1)))\n wandb.log(\n {\"Current stylization\": [wandb.Image(mysample)]},\n step=idx)\n tabledata = [\n wandb.Image(transforms.ToPILImage()(targetim)),\n wandb.Image(img),\n wandb.Image(mysample),\n ]\n samples.append(table_data)

\n\n

goptim.zerograd()\n loss.backward()\n g_optim.step()

\n\n

outtable = wandb.Table(data=samples, columns=columnnames)\nwandb.log({\"Current Samples\": out_table})\n

\n\n
    \n
  1. Save, Download, and Load Model

    \n\n

    Here's how to save and download your model.

  2. \n
\n\n
\nfrom PIL import Image\nimport torch\ntorch.backends.cudnn.benchmark = True\nfrom torchvision import transforms, utils\nfrom util import *\nimport math\nimport random\nimport numpy as np\nfrom torch import nn, autograd, optim\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\nimport lpips\nfrom model import *\nfrom e4e_projection import projection as e4e_projection\n\nfrom copy import deepcopy\nimport imageio\n\nimport os\nimport sys\nimport torchvision.transforms as transforms\nfrom argparse import Namespace\nfrom e4e.models.psp import pSp\nfrom util import *\nfrom huggingface_hub import hf_hub_download\nfrom google.colab import files\n\ntorch.save({\"g\": generator.state_dict()}, \"your-model-name.pt\")\n\nfiles.download('your-model-name.pt') \n\nlatent_dim = 512\ndevice=\"cuda\"\nmodel_path_s = hf_hub_download(repo_id=\"akhaliq/jojogan-stylegan2-ffhq-config-f\", filename=\"stylegan2-ffhq-config-f.pt\")\noriginal_generator = Generator(1024, latent_dim, 8, 2).to(device)\nckpt = torch.load(model_path_s, map_location=lambda storage, loc: storage)\noriginal_generator.load_state_dict(ckpt[\"g_ema\"], strict=False)\nmean_latent = original_generator.mean_latent(10000)\n\ngenerator = deepcopy(original_generator)\n\nckpt = torch.load(\"/content/JoJoGAN/your-model-name.pt\", map_location=lambda storage, loc: storage)\ngenerator.load_state_dict(ckpt[\"g\"], strict=False)\ngenerator.eval()\n\nplt.rcParams['figure.dpi'] = 150\n\n\n\ntransform = transforms.Compose(\n    [\n        transforms.Resize((1024, 1024)),\n        transforms.ToTensor(),\n        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n    ]\n)\n\n\ndef inference(img):  \n    img.save('out.jpg')  \n    aligned_face = align_face('out.jpg')\n\n    my_w = e4e_projection(aligned_face, \"out.pt\", device).unsqueeze(0)  \n    with torch.no_grad():\n        my_sample = generator(my_w, input_is_latent=True)\n\n\n    npimage = my_sample[0].cpu().permute(1, 2, 0).detach().numpy()\n    imageio.imwrite('filename.jpeg', npimage)\n    return 'filename.jpeg'\n
\n\n
    \n
  1. Build a Gradio Demo
  2. \n
\n\n
\nimport gradio as gr\n\ntitle = \"JoJoGAN\"\ndescription = \"Gradio Demo for JoJoGAN: One Shot Face Stylization. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.\"\n\ndemo = gr.Interface(\n    inference, \n    gr.Image(type=\"pil\"), \n    gr.Image(type=\"file\"),\n    title=title,\n    description=description\n)\n\ndemo.launch(share=True)\n
\n\n
    \n
  1. Integrate Gradio into your W&B Dashboard

    \n\n

    The last step\u2014integrating your Gradio demo with your W&B dashboard\u2014is just one extra line:

  2. \n
\n\n
\ndemo.integrate(wandb=wandb)\n
\n\n
Once you call integrate, a demo will be created and you can integrate it into your dashboard or report\n\nOutside of W&B with Web components, using the gradio-app tags allows anyone can embed Gradio demos on HF spaces directly into their blogs, websites, documentation, etc.:\n
\n\n
\n \n
\n\n
    \n
  1. (Optional) Embed W&B plots in your Gradio App

    \n\n

    It's also possible to embed W&B plots within Gradio apps. To do so, you can create a W&B Report of your plots and \nembed them within your Gradio app within a gr.HTML block.

    \n\n

    The Report will need to be public and you will need to wrap the URL within an iFrame like this:

  2. \n
\n\n
\nimport gradio as gr\n\ndef wandb_report(url):\n    iframe = f'\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained image classification model, so you should also have `torch` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from [PyTorch Hub](https://pytorch.org/hub/pytorch_vision_resnet/). You can use a different pretrained model or train your own. \n\n```python\nimport torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n```\n\nBecause we will be using the model for inference, we have called the `.eval()` method.\n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n  inp = transforms.ToTensor()(inp).unsqueeze(0)\n  with torch.no_grad():\n    prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n    confidences = {labels[i]: float(prediction[i]) for i in range(1000)}    \n  return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `PIL` image\n\nThen, the function converts the image to a PIL Image and then eventually a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we use `Image(type=\"pil\")` which creates the component and handles the preprocessing to convert that to a `PIL` image. \n\nThe output component will be a `Label`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as `Label(num_top_classes=3)`.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=gr.Image(type=\"pil\"),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification in PyTorch

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from autonomous vehicles to medical imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained image classification model, so you should also have torch installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Resnet-18 model, as it is easily downloadable from PyTorch Hub. You can use a different pretrained model or train your own.

\n\n
import torch\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n
\n\n

Because we will be using the model for inference, we have called the .eval() method.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\nfrom PIL import Image\nfrom torchvision import transforms\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n  inp = transforms.ToTensor()(inp).unsqueeze(0)\n  with torch.no_grad():\n    prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n    confidences = {labels[i]: float(prediction[i]) for i in range(1000)}    \n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a PIL image
  • \n
\n\n

Then, the function converts the image to a PIL Image and then eventually a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we use Image(type=\"pil\") which creates the component and handles the preprocessing to convert that to a PIL image.

\n\n

The output component will be a Label, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images by constructing it as Label(num_top_classes=3).

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=gr.Image(type=\"pil\"),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"lion.jpg\", \"cheetah.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "RESNET", "PYTORCH"], "spaces": ["https://huggingface.co/spaces/abidlabs/pytorch-image-classifier", "https://huggingface.co/spaces/pytorch/ResNet", "https://huggingface.co/spaces/pytorch/ResNext", "https://huggingface.co/spaces/pytorch/SqueezeNet"], "url": "/guides/image-classification-in-pytorch/", "contributor": null}, {"name": "image-classification-in-tensorflow", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 21, "pretty_name": "Image Classification In Tensorflow", "content": "# Image Classification in TensorFlow and Keras\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging. \n\nSuch models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). We will be using a pretrained Keras image classification model, so you should also have `tensorflow` installed.\n\n## Step 1 \u2014 Setting up the Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from [Keras](https://keras.io/api/applications/mobilenet/). You can use a different pretrained model or train your own. \n\n```python\nimport tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n```\n\nThis line automatically downloads the MobileNet model and weights using the Keras library. \n\n## Step 2 \u2014 Defining a `predict` function\n\nNext, we will need to define a function that takes in the *user input*, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://git.io/JJkYN).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nimport requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n inp = inp.reshape((-1, 224, 224, 3))\n inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n prediction = inception_net.predict(inp).flatten()\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n return confidences\n```\n\nLet's break this down. The function takes one parameter:\n\n* `inp`: the input image as a `numpy` array\n\nThen, the function adds a batch dimension, passes it through the model, and returns:\n\n* `confidences`: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## Step 3 \u2014 Creating a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a drag-and-drop image component. To create this input, we can use the `\"gradio.inputs.Image\"` class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.\n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.\n\nFinally, we'll add one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=classify_image, \n inputs=gr.Image(shape=(224, 224)),\n outputs=gr.Label(num_top_classes=3),\n examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try uploading your own examples!):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification in TensorFlow and Keras

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from traffic control systems to satellite imaging.

\n\n

Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. We will be using a pretrained Keras image classification model, so you should also have tensorflow installed.

\n\n

Step 1 \u2014 Setting up the Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a pretrained Mobile Net model, as it is easily downloadable from Keras. You can use a different pretrained model or train your own.

\n\n
import tensorflow as tf\n\ninception_net = tf.keras.applications.MobileNetV2()\n
\n\n

This line automatically downloads the MobileNet model and weights using the Keras library.

\n\n

Step 2 \u2014 Defining a predict function

\n\n

Next, we will need to define a function that takes in the user input, which in this case is an image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
import requests\n\n# Download human-readable labels for ImageNet.\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef classify_image(inp):\n  inp = inp.reshape((-1, 224, 224, 3))\n  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n  prediction = inception_net.predict(inp).flatten()\n  confidences = {labels[i]: float(prediction[i]) for i in range(1000)}\n  return confidences\n
\n\n

Let's break this down. The function takes one parameter:

\n\n
    \n
  • inp: the input image as a numpy array
  • \n
\n\n

Then, the function adds a batch dimension, passes it through the model, and returns:

\n\n
    \n
  • confidences: the predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

Step 3 \u2014 Creating a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a drag-and-drop image component. To create this input, we can use the \"gradio.inputs.Image\" class, which creates the component and handles the preprocessing to convert that to a numpy array. We will instantiate the class with a parameter that automatically preprocesses the input image to be 224 pixels by 224 pixels, which is the size that MobileNet expects.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form. Since we don't want to show all 1,000 class labels, we will customize it to show only the top 3 images.

\n\n

Finally, we'll add one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=classify_image, \n             inputs=gr.Image(shape=(224, 224)),\n             outputs=gr.Label(num_top_classes=3),\n             examples=[\"banana.jpg\", \"car.jpg\"]).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try uploading your own examples!):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "MOBILENET", "TENSORFLOW"], "spaces": ["https://huggingface.co/spaces/abidlabs/keras-image-classifier"], "url": "/guides/image-classification-in-tensorflow/", "contributor": null}, {"name": "image-classification-with-vision-transformers", "category": "integrating-other-frameworks", "pretty_category": "Integrating Other Frameworks", "guide_index": null, "absolute_index": 22, "pretty_name": "Image Classification With Vision Transformers", "content": "# Image Classification with Vision Transformers\n\n\n\n\n## Introduction\n\nImage classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control. \n\nState-of-the-art image classifiers are based on the *transformers* architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a **single line of Python**, and it will look like this (try one of the examples!):\n\n\n\n\nLet's get started!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started).\n\n## Step 1 \u2014 Choosing a Vision Image Classification Model\n\nFirst, we will need an image classification model. For this tutorial, we will use a model from the [Hugging Face Model Hub](https://huggingface.co/models?pipeline_tag=image-classification). The Hub contains thousands of models covering dozens of different machine learning tasks. \n\nExpand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.\n\nAt the time of writing, the most popular one is `google/vit-base-patch16-224`, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo. \n\n## Step 2 \u2014 Loading the Vision Transformer Model with Gradio\n\nWhen using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.\n\nBesides the import statement, it only takes a single line of Python to load and launch the demo. \n\nWe use the `gr.Interface.load()` method and pass in the path to the model including the `huggingface/` to designate that it is from the Hugging Face Hub.\n\n```python\nimport gradio as gr\n\ngr.Interface.load(\n \"huggingface/google/vit-base-patch16-224\",\n examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n```\n\nNotice that we have added one more parameter, the `examples`, which allows us to prepopulate our interfaces with a few predefined examples. \n\nThis produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!\n\n\n\n----------\n\nAnd you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting `share=True` when you `launch()` the Interface!\n\n", "html": "

Image Classification with Vision Transformers

\n\n

Introduction

\n\n

Image classification is a central task in computer vision. Building better classifiers to classify what object is present in a picture is an active area of research, as it has applications stretching from facial recognition to manufacturing quality control.

\n\n

State-of-the-art image classifiers are based on the transformers architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's image input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a single line of Python, and it will look like this (try one of the examples!):

\n\n\n\n

Let's get started!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Step 1 \u2014 Choosing a Vision Image Classification Model

\n\n

First, we will need an image classification model. For this tutorial, we will use a model from the Hugging Face Model Hub. The Hub contains thousands of models covering dozens of different machine learning tasks.

\n\n

Expand the Tasks category on the left sidebar and select \"Image Classification\" as our task of interest. You will then see all of the models on the Hub that are designed to classify images.

\n\n

At the time of writing, the most popular one is google/vit-base-patch16-224, which has been trained on ImageNet images at a resolution of 224x224 pixels. We will use this model for our demo.

\n\n

Step 2 \u2014 Loading the Vision Transformer Model with Gradio

\n\n

When using a model from the Hugging Face Hub, we do not need to define the input or output components for the demo. Similarly, we do not need to be concerned with the details of preprocessing or postprocessing. \nAll of these are automatically inferred from the model tags.

\n\n

Besides the import statement, it only takes a single line of Python to load and launch the demo.

\n\n

We use the gr.Interface.load() method and pass in the path to the model including the huggingface/ to designate that it is from the Hugging Face Hub.

\n\n
import gradio as gr\n\ngr.Interface.load(\n             \"huggingface/google/vit-base-patch16-224\",\n             examples=[\"alligator.jpg\", \"laptop.jpg\"]).launch()\n
\n\n

Notice that we have added one more parameter, the examples, which allows us to prepopulate our interfaces with a few predefined examples.

\n\n

This produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!

\n\n\n\n
\n\n

And you're done! In one line of code, you have built a web demo for an image classifier. If you'd like to share with others, try setting share=True when you launch() the Interface!

\n", "tags": ["VISION", "TRANSFORMERS", "HUB"], "spaces": ["https://huggingface.co/spaces/abidlabs/vision-transformer"], "url": "/guides/image-classification-with-vision-transformers/", "contributor": null}]}, {"category": "Tabular Data Science And Plots", "guides": [{"name": "connecting-to-a-database", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": 1, "absolute_index": 23, "pretty_name": "Connecting To A Database", "content": "# Connecting to a Database\n\n\n\n\n## Introduction\n\nThis guide explains how you can use Gradio to connect your app to a database. We will be\nconnecting to a PostgreSQL database hosted on AWS but gradio is completely agnostic to the type of\ndatabase you are connecting to and where it's hosted. So as long as you can write python code to connect\nto your data, you can display it in a web UI with gradio \ud83d\udcaa\n\n## Overview \n \nWe will be analyzing bike share data from Chicago. The data is hosted on kaggle [here](https://www.kaggle.com/datasets/evangower/cyclistic-bike-share?select=202203-divvy-tripdata.csv).\nOur goal is to create a dashboard that will enable our business stakeholders to answer the following questions:\n\n1. Are electric bikes more popular than regular bikes?\n2. What are the top 5 most popular departure bike stations?\n\nAt the end of this guide, we will have a functioning application that looks like this:\n\n \n\n\n## Step 1 - Creating your database\n\nWe will be storing our data on a PostgreSQL hosted on Amazon's RDS service. Create an AWS account if you don't already have one\nand create a PostgreSQL database on the free tier. \n\n**Important**: If you plan to host this demo on HuggingFace Spaces, make sure database is on port **8080**. Spaces will\nblock all outgoing connections unless they are made to port 80, 443, or 8080 as noted [here](https://huggingface.co/docs/hub/spaces-overview#networking).\nRDS will not let you create a postgreSQL instance on ports 80 or 443.\n\nOnce your database is created, download the dataset from Kaggle and upload it to your database.\nFor the sake of this demo, we will only upload March 2022 data.\n\n\n## Step 2.a - Write your ETL code\nWe will be querying our database for the total count of rides split by the type of bicycle (electric, standard, or docked).\nWe will also query for the total count of rides that depart from each station and take the top 5. \n\nWe will then take the result of our queries and visualize them in with matplotlib.\n\nWe will use the pandas [read_sql](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html)\nmethod to connect to the database. This requires the `psycopg2` library to be installed. \n\nIn order to connect to our database, we will specify the database username, password, and host as environment variables.\nThis will make our app more secure by avoiding storing sensitive information as plain text in our application files.\n\n```python\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nDB_USER = os.getenv(\"DB_USER\")\nDB_PASSWORD = os.getenv(\"DB_PASSWORD\")\nDB_HOST = os.getenv(\"DB_HOST\")\nPORT = 8080\nDB_NAME = \"bikeshare\"\n\nconnection_string = f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}\"\n\ndef get_count_ride_type():\n df = pd.read_sql(\n \"\"\"\n SELECT COUNT(ride_id) as n, rideable_type\n FROM rides\n GROUP BY rideable_type\n ORDER BY n DESC\n \"\"\",\n con=connection_string\n )\n fig_m, ax = plt.subplots()\n ax.bar(x=df['rideable_type'], height=df['n'])\n ax.set_title(\"Number of rides by bycycle type\")\n ax.set_ylabel(\"Number of Rides\")\n ax.set_xlabel(\"Bicycle Type\")\n return fig_m\n\n\ndef get_most_popular_stations():\n \n df = pd.read_sql(\n \"\"\"\n SELECT COUNT(ride_id) as n, MAX(start_station_name) as station\n FROM RIDES\n WHERE start_station_name is NOT NULL\n GROUP BY start_station_id\n ORDER BY n DESC\n LIMIT 5\n \"\"\",\n con=connection_string\n )\n fig_m, ax = plt.subplots()\n ax.bar(x=df['station'], height=df['n'])\n ax.set_title(\"Most popular stations\")\n ax.set_ylabel(\"Number of Rides\")\n ax.set_xlabel(\"Station Name\")\n ax.set_xticklabels(\n df['station'], rotation=45, ha=\"right\", rotation_mode=\"anchor\"\n )\n ax.tick_params(axis=\"x\", labelsize=8)\n fig_m.tight_layout()\n return fig_m\n```\n\nIf you were to run our script locally, you could pass in your credentials as environment variables like so\n\n```bash\nDB_USER='username' DB_PASSWORD='password' DB_HOST='host' python app.py\n```\n\n\n## Step 2.c - Write your gradio app\nWe will display or matplotlib plots in two separate `gr.Plot` components displayed side by side using `gr.Row()`.\nBecause we have wrapped our function to fetch the data in a `demo.load()` event trigger,\nour demo will fetch the latest data **dynamically** from the database each time the web page loads. \ud83e\ude84\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n with gr.Row():\n bike_type = gr.Plot()\n station = gr.Plot()\n\n demo.load(get_count_ride_type, inputs=None, outputs=bike_type)\n demo.load(get_most_popular_stations, inputs=None, outputs=station)\n\ndemo.launch()\n```\n\n## Step 3 - Deployment\nIf you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the `share=True` parameter to `launch`.\n\nBut what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.\n\nIf you haven't used Spaces before, follow the previous guide [here](/using_hugging_face_integrations).\nYou will have to add the `DB_USER`, `DB_PASSWORD`, and `DB_HOST` variables as \"Repo Secrets\". You can do this in the \"Settings\" tab.\n\n![secrets](https://github.com/gradio-app/gradio/blob/main/guides/assets/secrets.png?raw=true)\n\n## Conclusion\nCongratulations! You know how to connect your gradio app to a database hosted on the cloud! \u2601\ufe0f\n\nOur dashboard is now running on [Spaces](https://huggingface.co/spaces/gradio/chicago-bikeshare-dashboard).\nThe complete code is [here](https://huggingface.co/spaces/gradio/chicago-bikeshare-dashboard/blob/main/app.py)\n \nAs you can see, gradio gives you the power to connect to your data wherever it lives and display however you want! \ud83d\udd25", "html": "

Connecting to a Database

\n\n

Introduction

\n\n

This guide explains how you can use Gradio to connect your app to a database. We will be\nconnecting to a PostgreSQL database hosted on AWS but gradio is completely agnostic to the type of\ndatabase you are connecting to and where it's hosted. So as long as you can write python code to connect\nto your data, you can display it in a web UI with gradio \ud83d\udcaa

\n\n

Overview

\n\n

We will be analyzing bike share data from Chicago. The data is hosted on kaggle here.\nOur goal is to create a dashboard that will enable our business stakeholders to answer the following questions:

\n\n
    \n
  1. Are electric bikes more popular than regular bikes?
  2. \n
  3. What are the top 5 most popular departure bike stations?
  4. \n
\n\n

At the end of this guide, we will have a functioning application that looks like this:

\n\n

\n\n

Step 1 - Creating your database

\n\n

We will be storing our data on a PostgreSQL hosted on Amazon's RDS service. Create an AWS account if you don't already have one\nand create a PostgreSQL database on the free tier.

\n\n

Important: If you plan to host this demo on HuggingFace Spaces, make sure database is on port 8080. Spaces will\nblock all outgoing connections unless they are made to port 80, 443, or 8080 as noted here.\nRDS will not let you create a postgreSQL instance on ports 80 or 443.

\n\n

Once your database is created, download the dataset from Kaggle and upload it to your database.\nFor the sake of this demo, we will only upload March 2022 data.

\n\n

Step 2.a - Write your ETL code

\n\n

We will be querying our database for the total count of rides split by the type of bicycle (electric, standard, or docked).\nWe will also query for the total count of rides that depart from each station and take the top 5.

\n\n

We will then take the result of our queries and visualize them in with matplotlib.

\n\n

We will use the pandas read_sql\nmethod to connect to the database. This requires the psycopg2 library to be installed.

\n\n

In order to connect to our database, we will specify the database username, password, and host as environment variables.\nThis will make our app more secure by avoiding storing sensitive information as plain text in our application files.

\n\n
import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nDB_USER = os.getenv(\"DB_USER\")\nDB_PASSWORD = os.getenv(\"DB_PASSWORD\")\nDB_HOST = os.getenv(\"DB_HOST\")\nPORT = 8080\nDB_NAME = \"bikeshare\"\n\nconnection_string = f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}\"\n\ndef get_count_ride_type():\n    df = pd.read_sql(\n    \"\"\"\n        SELECT COUNT(ride_id) as n, rideable_type\n        FROM rides\n        GROUP BY rideable_type\n        ORDER BY n DESC\n    \"\"\",\n    con=connection_string\n    )\n    fig_m, ax = plt.subplots()\n    ax.bar(x=df['rideable_type'], height=df['n'])\n    ax.set_title(\"Number of rides by bycycle type\")\n    ax.set_ylabel(\"Number of Rides\")\n    ax.set_xlabel(\"Bicycle Type\")\n    return fig_m\n\n\ndef get_most_popular_stations():\n\n    df = pd.read_sql(\n        \"\"\"\n    SELECT COUNT(ride_id) as n, MAX(start_station_name) as station\n    FROM RIDES\n    WHERE start_station_name is NOT NULL\n    GROUP BY start_station_id\n    ORDER BY n DESC\n    LIMIT 5\n    \"\"\",\n    con=connection_string\n    )\n    fig_m, ax = plt.subplots()\n    ax.bar(x=df['station'], height=df['n'])\n    ax.set_title(\"Most popular stations\")\n    ax.set_ylabel(\"Number of Rides\")\n    ax.set_xlabel(\"Station Name\")\n    ax.set_xticklabels(\n        df['station'], rotation=45, ha=\"right\", rotation_mode=\"anchor\"\n    )\n    ax.tick_params(axis=\"x\", labelsize=8)\n    fig_m.tight_layout()\n    return fig_m\n
\n\n

If you were to run our script locally, you could pass in your credentials as environment variables like so

\n\n
DB_USER='username' DB_PASSWORD='password' DB_HOST='host' python app.py\n
\n\n

Step 2.c - Write your gradio app

\n\n

We will display or matplotlib plots in two separate gr.Plot components displayed side by side using gr.Row().\nBecause we have wrapped our function to fetch the data in a demo.load() event trigger,\nour demo will fetch the latest data dynamically from the database each time the web page loads. \ud83e\ude84

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        bike_type = gr.Plot()\n        station = gr.Plot()\n\n    demo.load(get_count_ride_type, inputs=None, outputs=bike_type)\n    demo.load(get_most_popular_stations, inputs=None, outputs=station)\n\ndemo.launch()\n
\n\n

Step 3 - Deployment

\n\n

If you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the share=True parameter to launch.

\n\n

But what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.

\n\n

If you haven't used Spaces before, follow the previous guide here.\nYou will have to add the DB_USER, DB_PASSWORD, and DB_HOST variables as \"Repo Secrets\". You can do this in the \"Settings\" tab.

\n\n

\"secrets\"

\n\n

Conclusion

\n\n

Congratulations! You know how to connect your gradio app to a database hosted on the cloud! \u2601\ufe0f

\n\n

Our dashboard is now running on Spaces.\nThe complete code is here

\n\n

As you can see, gradio gives you the power to connect to your data wherever it lives and display however you want! \ud83d\udd25

\n", "tags": ["TABULAR", "PLOTS "], "spaces": ["https://huggingface.co/spaces/gradio/chicago-bikeshare-dashboard"], "url": "/guides/connecting-to-a-database/", "contributor": null}, {"name": "creating-a-dashboard-from-bigquery-data", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 24, "pretty_name": "Creating A Dashboard From Bigquery Data", "content": "# Creating a Real-Time Dashboard from BigQuery Data\n\n\n\n\n[Google BigQuery](https://cloud.google.com/bigquery) is a cloud-based service for processing very large data sets. It is a serverless and highly scalable data warehousing solution that enables users to analyze data [using SQL-like queries](https://www.oreilly.com/library/view/google-bigquery-the/9781492044451/ch01.html).\n\nIn this tutorial, we will show you how to query a BigQuery dataset in Python and display the data in a dashboard that updates in real time using `gradio`. The dashboard will look like this:\n\n\n\nWe'll cover the following steps in this Guide:\n\n1. Setting up your BigQuery credentials\n2. Using the BigQuery client\n3. Building the real-time dashboard (in just *7 lines of Python*)\n\nWe'll be working with the [New York Times' COVID dataset](https://www.nytimes.com/interactive/2021/us/covid-cases.html) that is available as a public dataset on BigQuery. The dataset, named `covid19_nyt.us_counties` contains the latest information about the number of confirmed cases and deaths from COVID across US counties. \n\n**Prerequisites**: This Guide uses [Gradio Blocks](/guides/quickstart/#blocks-more-flexibility-and-control), so make your are familiar with the Blocks class. \n\n## Setting up your BigQuery Credentials\n\nTo use Gradio with BigQuery, you will need to obtain your BigQuery credentials and use them with the [BigQuery Python client](https://pypi.org/project/google-cloud-bigquery/). If you already have BigQuery credentials (as a `.json` file), you can skip this section. If not, you can do this for free in just a couple of minutes.\n\n1. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)\n\n2. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.\n\n3. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"BigQuery API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then the BigQuery is already enabled, and you're all set. \n\n4. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.\n\n5. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. Also grant the service account permissions by giving it a role such as \"BigQuery User\", which will allow you to run queries.\n\n6. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:\n\n```json\n{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n```\n\n## Using the BigQuery Client\n\nOnce you have the credentials, you will need to use the BigQuery Python client to authenticate using your credentials. To do this, you will need to install the BigQuery Python client by running the following command in the terminal:\n\n```bash\npip install google-cloud-bigquery[pandas]\n```\n\nYou'll notice that we've installed the pandas add-on, which will be helpful for processing the BigQuery dataset as a pandas dataframe. Once the client is installed, you can authenticate using your credentials by running the following code:\n\n```py\nfrom google.cloud import bigquery\n\nclient = bigquery.Client.from_service_account_json(\"path/to/key.json\")\n```\n\nWith your credentials authenticated, you can now use the BigQuery Python client to interact with your BigQuery datasets. \n\nHere is an example of a function which queries the `covid19_nyt.us_counties` dataset in BigQuery to show the top 20 counties with the most confirmed cases as of the current day:\n\n```py\nimport numpy as np\n\nQUERY = (\n 'SELECT * FROM `bigquery-public-data.covid19_nyt.us_counties` ' \n 'ORDER BY date DESC,confirmed_cases DESC '\n 'LIMIT 20')\n\ndef run_query():\n query_job = client.query(QUERY) \n query_result = query_job.result() \n df = query_result.to_dataframe()\n # Select a subset of columns \n df = df[[\"confirmed_cases\", \"deaths\", \"county\", \"state_name\"]]\n # Convert numeric columns to standard numpy types\n df = df.astype({\"deaths\": np.int64, \"confirmed_cases\": np.int64})\n return df\n```\n\n## Building the Real-Time Dashboard\n\nOnce you have a function to query the data, you can use the `gr.DataFrame` component from the Gradio library to display the results in a tabular format. This is a useful way to inspect the data and make sure that it has been queried correctly.\n\nHere is an example of how to use the `gr.DataFrame` component to display the results. By passing in the `run_query` function to `gr.DataFrame`, we instruct Gradio to run the function as soon as the page loads and show the results. In addition, you also pass in the keyword `every` to tell the dashboard to refresh every hour (60*60 seconds).\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.DataFrame(run_query, every=60*60)\n\ndemo.queue().launch() # Run the demo using queuing\n```\n\nPerhaps you'd like to add a visualization to our dashboard. You can use the `gr.ScatterPlot()` component to visualize the data in a scatter plot. This allows you to see the relationship between different variables such as case count and case deaths in the dataset and can be useful for exploring the data and gaining insights. Again, we can do this in real-time\nby passing in the `every` parameter. \n\nHere is a complete example showing how to use the `gr.ScatterPlot` to visualize in addition to displaying data with the `gr.DataFrame`\n\n```py\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# \ud83d\udc89 Covid Dashboard (Updated Hourly)\")\n with gr.Row():\n gr.DataFrame(run_query, every=60*60)\n gr.ScatterPlot(run_query, every=60*60, x=\"confirmed_cases\", \n y=\"deaths\", tooltip=\"county\", width=500, height=500)\n\ndemo.queue().launch() # Run the demo with queuing enabled\n```", "html": "

Creating a Real-Time Dashboard from BigQuery Data

\n\n

Google BigQuery is a cloud-based service for processing very large data sets. It is a serverless and highly scalable data warehousing solution that enables users to analyze data using SQL-like queries.

\n\n

In this tutorial, we will show you how to query a BigQuery dataset in Python and display the data in a dashboard that updates in real time using gradio. The dashboard will look like this:

\n\n

\n\n

We'll cover the following steps in this Guide:

\n\n
    \n
  1. Setting up your BigQuery credentials
  2. \n
  3. Using the BigQuery client
  4. \n
  5. Building the real-time dashboard (in just 7 lines of Python)
  6. \n
\n\n

We'll be working with the New York Times' COVID dataset that is available as a public dataset on BigQuery. The dataset, named covid19_nyt.us_counties contains the latest information about the number of confirmed cases and deaths from COVID across US counties.

\n\n

Prerequisites: This Guide uses Gradio Blocks, so make your are familiar with the Blocks class.

\n\n

Setting up your BigQuery Credentials

\n\n

To use Gradio with BigQuery, you will need to obtain your BigQuery credentials and use them with the BigQuery Python client. If you already have BigQuery credentials (as a .json file), you can skip this section. If not, you can do this for free in just a couple of minutes.

\n\n
    \n
  1. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)

  2. \n
  3. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.

  4. \n
  5. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"BigQuery API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then the BigQuery is already enabled, and you're all set.

  6. \n
  7. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.

  8. \n
  9. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. Also grant the service account permissions by giving it a role such as \"BigQuery User\", which will allow you to run queries.

  10. \n
  11. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:

  12. \n
\n\n
{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\":  \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n
\n\n

Using the BigQuery Client

\n\n

Once you have the credentials, you will need to use the BigQuery Python client to authenticate using your credentials. To do this, you will need to install the BigQuery Python client by running the following command in the terminal:

\n\n
pip install google-cloud-bigquery[pandas]\n
\n\n

You'll notice that we've installed the pandas add-on, which will be helpful for processing the BigQuery dataset as a pandas dataframe. Once the client is installed, you can authenticate using your credentials by running the following code:

\n\n
from google.cloud import bigquery\n\nclient = bigquery.Client.from_service_account_json(\"path/to/key.json\")\n
\n\n

With your credentials authenticated, you can now use the BigQuery Python client to interact with your BigQuery datasets.

\n\n

Here is an example of a function which queries the covid19_nyt.us_counties dataset in BigQuery to show the top 20 counties with the most confirmed cases as of the current day:

\n\n
import numpy as np\n\nQUERY = (\n    'SELECT * FROM `bigquery-public-data.covid19_nyt.us_counties` ' \n    'ORDER BY date DESC,confirmed_cases DESC '\n    'LIMIT 20')\n\ndef run_query():\n    query_job = client.query(QUERY)  \n    query_result = query_job.result()  \n    df = query_result.to_dataframe()\n    # Select a subset of columns \n    df = df[[\"confirmed_cases\", \"deaths\", \"county\", \"state_name\"]]\n    # Convert numeric columns to standard numpy types\n    df = df.astype({\"deaths\": np.int64, \"confirmed_cases\": np.int64})\n    return df\n
\n\n

Building the Real-Time Dashboard

\n\n

Once you have a function to query the data, you can use the gr.DataFrame component from the Gradio library to display the results in a tabular format. This is a useful way to inspect the data and make sure that it has been queried correctly.

\n\n

Here is an example of how to use the gr.DataFrame component to display the results. By passing in the run_query function to gr.DataFrame, we instruct Gradio to run the function as soon as the page loads and show the results. In addition, you also pass in the keyword every to tell the dashboard to refresh every hour (60*60 seconds).

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.DataFrame(run_query, every=60*60)\n\ndemo.queue().launch()  # Run the demo using queuing\n
\n\n

Perhaps you'd like to add a visualization to our dashboard. You can use the gr.ScatterPlot() component to visualize the data in a scatter plot. This allows you to see the relationship between different variables such as case count and case deaths in the dataset and can be useful for exploring the data and gaining insights. Again, we can do this in real-time\nby passing in the every parameter.

\n\n

Here is a complete example showing how to use the gr.ScatterPlot to visualize in addition to displaying data with the gr.DataFrame

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"# \ud83d\udc89 Covid Dashboard (Updated Hourly)\")\n    with gr.Row():\n        gr.DataFrame(run_query, every=60*60)\n        gr.ScatterPlot(run_query, every=60*60, x=\"confirmed_cases\", \n                        y=\"deaths\", tooltip=\"county\", width=500, height=500)\n\ndemo.queue().launch()  # Run the demo with queuing enabled\n
\n", "tags": ["TABULAR", "DASHBOARD", "PLOTS "], "spaces": [], "url": "/guides/creating-a-dashboard-from-bigquery-data/", "contributor": null}, {"name": "creating-a-dashboard-from-supabase-data", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 25, "pretty_name": "Creating A Dashboard From Supabase Data", "content": "# Create a Dashboard from Supabase Data\n\n\n\n[Supabase](https://supabase.com/) is a cloud-based open-source backend that provides a PostgreSQL database, authentication, and other useful features for building web and mobile applications. In this tutorial, you will learn how to read data from Supabase and plot it in **real-time** on a Gradio Dashboard.\n\n**Prerequisites:** To start, you will need a free Supabase account, which you can sign up for here: [https://app.supabase.com/](https://app.supabase.com/)\n\nIn this end-to-end guide, you will learn how to:\n\n* Create tables in Supabase\n* Write data to Supabase using the Supabase Python Client\n* Visualize the data in a real-time dashboard using Gradio\n\nIf you already have data on Supabase that you'd like to visualize in a dashboard, you can skip the first two sections and go directly to [visualizing the data](#visualize-the-data-in-a-real-time-gradio-dashboard)!\n\n## Create a table in Supabase\n\nFirst of all, we need some data to visualize. Following this [excellent guide](https://supabase.com/blog/loading-data-supabase-python), we'll create fake commerce data and put it in Supabase. \n\n1\\. Start by creating a new project in Supabase. Once you're logged in, click the \"New Project\" button\n\n2\\. Give your project a name and database password. You can also choose a pricing plan (for our purposes, the Free Tier is sufficient!)\n\n3\\. You'll be presented with your API keys while the database spins up (can take up to 2 minutes). \n\n4\\. Click on \"Table Editor\" (the table icon) in the left pane to create a new table. We'll create a single table called `Product`, with the following schema:\n\n
\n\n\n\n\n\n
product_idint8
inventory_countint8
pricefloat8
product_namevarchar
\n
\n\n5\\. Click Save to save the table schema. \n\n\nOur table is now ready!\n\n\n## Write data to Supabase\n\nThe next step is to write data to a Supabase dataset. We will use the Supabase Python library to do this. \n\n6\\. Install `supabase` by running the following command in your terminal:\n\n```bash\npip install supabase\n```\n\n7\\. Get your project URL and API key. Click the Settings (gear icon) on the left pane and click 'API'. The URL is listed in the Project URL box, while the API key is listed in Project API keys (with the tags `service_role`, `secret`)\n\n8\\. Now, run the following Python script to write some fake data to the table (note you have to put the values of `SUPABASE_URL` and `SUPABASE_SECRET_KEY` from step 7): \n\n```python\nimport supabase\n\n# Initialize the Supabase client\nclient = supabase.create_client('SUPABASE_URL', 'SUPABASE_SECRET_KEY')\n\n# Define the data to write\nimport random\n\nmain_list = []\nfor i in range(10):\n value = {'product_id': i, \n 'product_name': f\"Item {i}\",\n 'inventory_count': random.randint(1, 100), \n 'price': random.random()*100\n }\n main_list.append(value)\n\n# Write the data to the table\ndata = client.table('Product').insert(main_list).execute()\n```\n\nReturn to your Supabase dashboard and refresh the page, you should now see 10 rows populated in the `Product` table!\n\n## Visualize the Data in a Real-Time Gradio Dashboard\n\nFinally, we will read the data from the Supabase dataset using the same `supabase` Python library and create a realtime dashboard using `gradio`. \n\nNote: We repeat certain steps in this section (like creating the Supabase client) in case you did not go through the previous sections. As described in Step 7, you will need the project URL and API Key for your database.\n\n9\\. Write a function that loads the data from the `Product` table and returns it as a pandas Dataframe:\n\n\n```python\nimport supabase\nimport pandas as pd\n\nclient = supabase.create_client('SUPABASE_URL', 'SUPABASE_SECRET_KEY')\n\ndef read_data():\n response = client.table('Product').select(\"*\").execute()\n df = pd.DataFrame(response.data)\n return df\n```\n\n10\\. Create a small Gradio Dashboard with 2 Barplots that plots the prices and inventories of all of the items every minute and updates in real-time:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as dashboard:\n with gr.Row():\n gr.BarPlot(read_data, x=\"product_id\", y=\"price\", title=\"Prices\", every=60)\n gr.BarPlot(read_data, x=\"product_id\", y=\"inventory_count\", title=\"Inventory\", every=60)\n\ndashboard.queue().launch()\n```\n\nNotice that by passing in a function to `gr.BarPlot()`, we have the BarPlot query the database as soon as the web app loads (and then again every 60 seconds because of the `every` parameter). Your final dashboard should look something like this:\n\n\n\n\n## Conclusion\n\nThat's it! In this tutorial, you learned how to write data to a Supabase dataset, and then read that data and plot the results as bar plots. If you update the data in the Supabase database, you'll notice that the Gradio dashboard will update within a minute. \n\nTry adding more plots and visualizations to this example (or with a different dataset) to build a more complex dashboard! ", "html": "

Create a Dashboard from Supabase Data

\n\n

Supabase is a cloud-based open-source backend that provides a PostgreSQL database, authentication, and other useful features for building web and mobile applications. In this tutorial, you will learn how to read data from Supabase and plot it in real-time on a Gradio Dashboard.

\n\n

Prerequisites: To start, you will need a free Supabase account, which you can sign up for here: https://app.supabase.com/

\n\n

In this end-to-end guide, you will learn how to:

\n\n
    \n
  • Create tables in Supabase
  • \n
  • Write data to Supabase using the Supabase Python Client
  • \n
  • Visualize the data in a real-time dashboard using Gradio
  • \n
\n\n

If you already have data on Supabase that you'd like to visualize in a dashboard, you can skip the first two sections and go directly to visualizing the data!

\n\n

Create a table in Supabase

\n\n

First of all, we need some data to visualize. Following this excellent guide, we'll create fake commerce data and put it in Supabase.

\n\n

1. Start by creating a new project in Supabase. Once you're logged in, click the \"New Project\" button

\n\n

2. Give your project a name and database password. You can also choose a pricing plan (for our purposes, the Free Tier is sufficient!)

\n\n

3. You'll be presented with your API keys while the database spins up (can take up to 2 minutes).

\n\n

4. Click on \"Table Editor\" (the table icon) in the left pane to create a new table. We'll create a single table called Product, with the following schema:

\n\n

\n\n\n\n\n\n\n
product_idint8
inventory_countint8
pricefloat8
product_namevarchar
\n\n

\n\n

5. Click Save to save the table schema.

\n\n

Our table is now ready!

\n\n

Write data to Supabase

\n\n

The next step is to write data to a Supabase dataset. We will use the Supabase Python library to do this.

\n\n

6. Install supabase by running the following command in your terminal:

\n\n
pip install supabase\n
\n\n

7. Get your project URL and API key. Click the Settings (gear icon) on the left pane and click 'API'. The URL is listed in the Project URL box, while the API key is listed in Project API keys (with the tags service_role, secret)

\n\n

8. Now, run the following Python script to write some fake data to the table (note you have to put the values of SUPABASE_URL and SUPABASE_SECRET_KEY from step 7):

\n\n
import supabase\n\n# Initialize the Supabase client\nclient = supabase.create_client('SUPABASE_URL', 'SUPABASE_SECRET_KEY')\n\n# Define the data to write\nimport random\n\nmain_list = []\nfor i in range(10):\n    value = {'product_id': i, \n             'product_name': f\"Item {i}\",\n             'inventory_count': random.randint(1, 100), \n             'price': random.random()*100\n            }\n    main_list.append(value)\n\n# Write the data to the table\ndata = client.table('Product').insert(main_list).execute()\n
\n\n

Return to your Supabase dashboard and refresh the page, you should now see 10 rows populated in the Product table!

\n\n

Visualize the Data in a Real-Time Gradio Dashboard

\n\n

Finally, we will read the data from the Supabase dataset using the same supabase Python library and create a realtime dashboard using gradio.

\n\n

Note: We repeat certain steps in this section (like creating the Supabase client) in case you did not go through the previous sections. As described in Step 7, you will need the project URL and API Key for your database.

\n\n

9. Write a function that loads the data from the Product table and returns it as a pandas Dataframe:

\n\n
import supabase\nimport pandas as pd\n\nclient = supabase.create_client('SUPABASE_URL', 'SUPABASE_SECRET_KEY')\n\ndef read_data():\n    response = client.table('Product').select(\"*\").execute()\n    df = pd.DataFrame(response.data)\n    return df\n
\n\n

10. Create a small Gradio Dashboard with 2 Barplots that plots the prices and inventories of all of the items every minute and updates in real-time:

\n\n
import gradio as gr\n\nwith gr.Blocks() as dashboard:\n    with gr.Row():\n        gr.BarPlot(read_data, x=\"product_id\", y=\"price\", title=\"Prices\", every=60)\n        gr.BarPlot(read_data, x=\"product_id\", y=\"inventory_count\", title=\"Inventory\", every=60)\n\ndashboard.queue().launch()\n
\n\n

Notice that by passing in a function to gr.BarPlot(), we have the BarPlot query the database as soon as the web app loads (and then again every 60 seconds because of the every parameter). Your final dashboard should look something like this:

\n\n

\n\n

Conclusion

\n\n

That's it! In this tutorial, you learned how to write data to a Supabase dataset, and then read that data and plot the results as bar plots. If you update the data in the Supabase database, you'll notice that the Gradio dashboard will update within a minute.

\n\n

Try adding more plots and visualizations to this example (or with a different dataset) to build a more complex dashboard!

\n", "tags": ["TABULAR", "DASHBOARD", "PLOTS "], "spaces": [], "url": "/guides/creating-a-dashboard-from-supabase-data/", "contributor": null}, {"name": "creating-a-realtime-dashboard-from-google-sheets", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 26, "pretty_name": "Creating A Realtime Dashboard From Google Sheets", "content": "# Creating a Real-Time Dashboard from Google Sheets\n\n\n\n[Google Sheets](https://www.google.com/sheets/about/) are an easy way to store tabular data in the form of spreadsheets. With Gradio and pandas, it's easy to read data from public or private Google Sheets and then display the data or plot it. In this blog post, we'll build a small *real-time* dashboard, one that updates when the data in the Google Sheets updates. \n\nBuilding the dashboard itself will just be 9 lines of Python code using Gradio, and our final dashboard will look like this:\n\n\n\n**Prerequisites**: This Guide uses [Gradio Blocks](/guides/quickstart/#blocks-more-flexibility-and-control), so make you are familiar with the Blocks class. \n\nThe process is a little different depending on if you are working with a publicly accessible or a private Google Sheet. We'll cover both, so let's get started!\n\n## Public Google Sheets\n\nBuilding a dashboard from a public Google Sheet is very easy, thanks to the [`pandas` library](https://pandas.pydata.org/):\n\n1\\. Get the URL of the Google Sheets that you want to use. To do this, simply go to the Google Sheets, click on the \"Share\" button in the top-right corner, and then click on the \"Get shareable link\" button. This will give you a URL that looks something like this:\n\n```html\nhttps://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\n```\n\n2\\. Now, let's modify this URL and then use it to read the data from the Google Sheets into a Pandas DataFrame. (In the code below, replace the `URL` variable with the URL of your public Google Sheet):\n\n```python\nimport pandas as pd\n\nURL = \"https://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\"\ncsv_url = URL.replace('/edit#gid=', '/export?format=csv&gid=')\n\ndef get_data():\n return pd.read_csv(csv_url)\n```\n\n3\\. The data query is a function, which means that it's easy to display it real-time using the the `gr.DataFrame` component, or plot it real-time using the `gr.LinePlot` component (of course, depending on the data, a different plot may be appropriate). To do this, just pass the function into the respective components, and set the `every` parameter based on how frequently (in seconds) you would like the component to refresh. Here's the Gradio code:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# \ud83d\udcc8 Real-Time Line Plot\")\n with gr.Row():\n with gr.Column():\n gr.DataFrame(get_data, every=5)\n with gr.Column():\n gr.LinePlot(get_data, every=5, x=\"Date\", y=\"Sales\", y_title=\"Sales ($ millions)\", overlay_point=True, width=500, height=500)\n\ndemo.queue().launch() # Run the demo with queuing enabled\n```\n \nAnd that's it! You have a dashboard that refreshes every 5 seconds, pulling the data from your Google Sheet.\n\n## Private Google Sheets\n\nFor private Google Sheets, the process requires a little more work, but not that much! The key difference is that now, you must authenticate yourself to authorize access to the private Google Sheets.\n\n### Authentication\n\nTo authenticate yourself, obtain credentials from Google Cloud. Here's [how to set up google cloud credentials](https://developers.google.com/workspace/guides/create-credentials):\n\n1\\. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)\n\n2\\. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.\n\n3\\. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"Google Sheets API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then Google Sheets is already enabled, and you're all set. \n\n4\\. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.\n\n5\\. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. **Note down the email of the service account**\n\n6\\. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:\n\n```json\n{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n```\n\n### Querying\n\nOnce you have the credentials `.json` file, you can use the following steps to query your Google Sheet:\n\n1\\. Click on the \"Share\" button in the top-right corner of the Google Sheet. Share the Google Sheets with the email address of the service from Step 5 of authentication subsection (this step is important!). Then click on the \"Get shareable link\" button. This will give you a URL that looks something like this:\n\n```html\nhttps://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\n```\n\n\n2\\. Install the [`gspread` library](https://docs.gspread.org/en/v5.7.0/), which makes it easy to work with the [Google Sheets API](https://developers.google.com/sheets/api/guides/concepts) in Python by running in the terminal: `pip install gspread`\n\n3\\. Write a function to load the data from the Google Sheet, like this (replace the `URL` variable with the URL of your private Google Sheet):\n\n```python\nimport gspread\nimport pandas as pd\n\n# Authenticate with Google and get the sheet\nURL = 'https://docs.google.com/spreadsheets/d/1_91Vps76SKOdDQ8cFxZQdgjTJiz23375sAT7vPvaj4k/edit#gid=0'\n\ngc = gspread.service_account(\"path/to/key.json\")\nsh = gc.open_by_url(URL)\nworksheet = sh.sheet1 \n\ndef get_data():\n values = worksheet.get_all_values()\n df = pd.DataFrame(values[1:], columns=values[0])\n return df\n\n```\n\n4\\. The data query is a function, which means that it's easy to display it real-time using the the `gr.DataFrame` component, or plot it real-time using the `gr.LinePlot` component (of course, depending on the data, a different plot may be appropriate). To do this, we just pass the function into the respective components, and set the `every` parameter based on how frequently (in seconds) we would like the component to refresh. Here's the Gradio code:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# \ud83d\udcc8 Real-Time Line Plot\")\n with gr.Row():\n with gr.Column():\n gr.DataFrame(get_data, every=5)\n with gr.Column():\n gr.LinePlot(get_data, every=5, x=\"Date\", y=\"Sales\", y_title=\"Sales ($ millions)\", overlay_point=True, width=500, height=500)\n\ndemo.queue().launch() # Run the demo with queuing enabled\n```\n \nYou now have a Dashboard that refreshes every 5 seconds, pulling the data from your Google Sheet.\n\n\n## Conclusion\n\nAnd that's all there is to it! With just a few lines of code, you can use `gradio` and other libraries to read data from a public or private Google Sheet and then display and plot the data in a real-time dashboard.\n\n\n\n", "html": "

Creating a Real-Time Dashboard from Google Sheets

\n\n

Google Sheets are an easy way to store tabular data in the form of spreadsheets. With Gradio and pandas, it's easy to read data from public or private Google Sheets and then display the data or plot it. In this blog post, we'll build a small real-time dashboard, one that updates when the data in the Google Sheets updates.

\n\n

Building the dashboard itself will just be 9 lines of Python code using Gradio, and our final dashboard will look like this:

\n\n

\n\n

Prerequisites: This Guide uses Gradio Blocks, so make you are familiar with the Blocks class.

\n\n

The process is a little different depending on if you are working with a publicly accessible or a private Google Sheet. We'll cover both, so let's get started!

\n\n

Public Google Sheets

\n\n

Building a dashboard from a public Google Sheet is very easy, thanks to the pandas library:

\n\n

1. Get the URL of the Google Sheets that you want to use. To do this, simply go to the Google Sheets, click on the \"Share\" button in the top-right corner, and then click on the \"Get shareable link\" button. This will give you a URL that looks something like this:

\n\n
https://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\n
\n\n

2. Now, let's modify this URL and then use it to read the data from the Google Sheets into a Pandas DataFrame. (In the code below, replace the URL variable with the URL of your public Google Sheet):

\n\n
import pandas as pd\n\nURL = \"https://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\"\ncsv_url = URL.replace('/edit#gid=', '/export?format=csv&gid=')\n\ndef get_data():\n    return pd.read_csv(csv_url)\n
\n\n

3. The data query is a function, which means that it's easy to display it real-time using the the gr.DataFrame component, or plot it real-time using the gr.LinePlot component (of course, depending on the data, a different plot may be appropriate). To do this, just pass the function into the respective components, and set the every parameter based on how frequently (in seconds) you would like the component to refresh. Here's the Gradio code:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"# \ud83d\udcc8 Real-Time Line Plot\")\n    with gr.Row():\n        with gr.Column():\n            gr.DataFrame(get_data, every=5)\n        with gr.Column():\n            gr.LinePlot(get_data, every=5, x=\"Date\", y=\"Sales\", y_title=\"Sales ($ millions)\", overlay_point=True, width=500, height=500)\n\ndemo.queue().launch()  # Run the demo with queuing enabled\n
\n\n

And that's it! You have a dashboard that refreshes every 5 seconds, pulling the data from your Google Sheet.

\n\n

Private Google Sheets

\n\n

For private Google Sheets, the process requires a little more work, but not that much! The key difference is that now, you must authenticate yourself to authorize access to the private Google Sheets.

\n\n

Authentication

\n\n

To authenticate yourself, obtain credentials from Google Cloud. Here's how to set up google cloud credentials:

\n\n

1. First, log in to your Google Cloud account and go to the Google Cloud Console (https://console.cloud.google.com/)

\n\n

2. In the Cloud Console, click on the hamburger menu in the top-left corner and select \"APIs & Services\" from the menu. If you do not have an existing project, you will need to create one.

\n\n

3. Then, click the \"+ Enabled APIs & services\" button, which allows you to enable specific services for your project. Search for \"Google Sheets API\", click on it, and click the \"Enable\" button. If you see the \"Manage\" button, then Google Sheets is already enabled, and you're all set.

\n\n

4. In the APIs & Services menu, click on the \"Credentials\" tab and then click on the \"Create credentials\" button.

\n\n

5. In the \"Create credentials\" dialog, select \"Service account key\" as the type of credentials to create, and give it a name. Note down the email of the service account

\n\n

6. After selecting the service account, select the \"JSON\" key type and then click on the \"Create\" button. This will download the JSON key file containing your credentials to your computer. It will look something like this:

\n\n
{\n \"type\": \"service_account\",\n \"project_id\": \"your project\",\n \"private_key_id\": \"your private key id\",\n \"private_key\": \"private key\",\n \"client_email\": \"email\",\n \"client_id\": \"client id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\":  \"https://www.googleapis.com/robot/v1/metadata/x509/email_id\"\n}\n
\n\n

Querying

\n\n

Once you have the credentials .json file, you can use the following steps to query your Google Sheet:

\n\n

1. Click on the \"Share\" button in the top-right corner of the Google Sheet. Share the Google Sheets with the email address of the service from Step 5 of authentication subsection (this step is important!). Then click on the \"Get shareable link\" button. This will give you a URL that looks something like this:

\n\n
https://docs.google.com/spreadsheets/d/1UoKzzRzOCt-FXLLqDKLbryEKEgllGAQUEJ5qtmmQwpU/edit#gid=0\n
\n\n

2. Install the gspread library, which makes it easy to work with the Google Sheets API in Python by running in the terminal: pip install gspread

\n\n

3. Write a function to load the data from the Google Sheet, like this (replace the URL variable with the URL of your private Google Sheet):

\n\n
import gspread\nimport pandas as pd\n\n# Authenticate with Google and get the sheet\nURL = 'https://docs.google.com/spreadsheets/d/1_91Vps76SKOdDQ8cFxZQdgjTJiz23375sAT7vPvaj4k/edit#gid=0'\n\ngc = gspread.service_account(\"path/to/key.json\")\nsh = gc.open_by_url(URL)\nworksheet = sh.sheet1 \n\ndef get_data():\n    values = worksheet.get_all_values()\n    df = pd.DataFrame(values[1:], columns=values[0])\n    return df\n\n
\n\n

4. The data query is a function, which means that it's easy to display it real-time using the the gr.DataFrame component, or plot it real-time using the gr.LinePlot component (of course, depending on the data, a different plot may be appropriate). To do this, we just pass the function into the respective components, and set the every parameter based on how frequently (in seconds) we would like the component to refresh. Here's the Gradio code:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"# \ud83d\udcc8 Real-Time Line Plot\")\n    with gr.Row():\n        with gr.Column():\n            gr.DataFrame(get_data, every=5)\n        with gr.Column():\n            gr.LinePlot(get_data, every=5, x=\"Date\", y=\"Sales\", y_title=\"Sales ($ millions)\", overlay_point=True, width=500, height=500)\n\ndemo.queue().launch()  # Run the demo with queuing enabled\n
\n\n

You now have a Dashboard that refreshes every 5 seconds, pulling the data from your Google Sheet.

\n\n

Conclusion

\n\n

And that's all there is to it! With just a few lines of code, you can use gradio and other libraries to read data from a public or private Google Sheet and then display and plot the data in a real-time dashboard.

\n", "tags": ["TABULAR", "DASHBOARD", "PLOTS "], "spaces": [], "url": "/guides/creating-a-realtime-dashboard-from-google-sheets/", "contributor": null}, {"name": "plot-component-for-maps", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 27, "pretty_name": "Plot Component For Maps", "content": "# How to Use the Plot Component for Maps\n\n\n\n## Introduction\n\nThis guide explains how you can use Gradio to plot geographical data on a map using the `gradio.Plot` component. The Gradio `Plot` component works with Matplotlib, Bokeh and Plotly. Plotly is what we will be working with in this guide. Plotly allows developers to easily create all sorts of maps with their geographical data. Take a look [here](https://plotly.com/python/maps/) for some examples.\n\n## Overview\n\nWe will be using the New York City Airbnb dataset, which is hosted on kaggle [here](https://www.kaggle.com/datasets/dgomonov/new-york-city-airbnb-open-data). I've uploaded it to the Hugging Face Hub as a dataset [here](https://huggingface.co/datasets/gradio/NYC-Airbnb-Open-Data) for easier use and download. Using this data we will plot Airbnb locations on a map output and allow filtering based on price and location. Below is the demo that we will be building. \u26a1\ufe0f\n\n\n\n## Step 1 - Loading CSV data \ud83d\udcbe\n\nLet's start by loading the Airbnb NYC data from the Hugging Face Hub.\n\n```python\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n new_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = new_df[\"name\"].tolist()\n prices = new_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n```\n\nIn the code above, we first load the csv data into a pandas dataframe. Let's begin by defining a function that we will use as the prediction function for the gradio app. This function will accept the minimum price and maximum price range as well as the list of boroughs to filter the resulting map. We can use the passed in values (`min_price`, `max_price`, and list of `boroughs`) to filter the dataframe and create `new_df`. Next we will create `text_list` of the names and prices of each Airbnb to use as labels on the map.\n\n## Step 2 - Map Figure \ud83c\udf10\n\nPlotly makes it easy to work with maps. Let's take a look below how we can create a map figure.\n\n```python\nimport plotly.graph_objects as go\n\nfig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=new_df['latitude'].tolist(),\n lon=new_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\nfig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n)\n```\n\nAbove, we create a scatter plot on mapbox by passing it our list of latitudes and longitudes to plot markers. We also pass in our custom data of names and prices for additional info to appear on every marker we hover over. Next we use `update_layout` to specify other map settings such as zoom, and centering.\n\nMore info [here](https://plotly.com/python/scattermapbox/) on scatter plots using Mapbox and Plotly.\n\n## Step 3 - Gradio App \u26a1\ufe0f\n\nWe will use two `gr.Number` components and a `gr.CheckboxGroup` to allow users of our app to specify price ranges and borough locations. We will then use the `gr.Plot` component as an output for our Plotly + Mapbox map we created earlier.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n```\n\nWe layout these components using the `gr.Column` and `gr.Row` and we'll also add event triggers for when the demo first loads and when our \"Update Filter\" button is clicked in order to trigger the map to update with our new filters.\n\nThis is what the full demo code looks like:\n\n```python\nimport gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = filtered_df[\"name\"].tolist()\n prices = filtered_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n fig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=filtered_df['latitude'].tolist(),\n lon=filtered_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\ndemo.launch()\n```\n\n## Step 4 - Deployment \ud83e\udd17\n\nIf you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the `share=True` parameter to `launch`.\n\nBut what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.\n\nIf you haven't used Spaces before, follow the previous guide [here](/using_hugging_face_integrations).\n\n## Conclusion \ud83c\udf89\n\nAnd you're all done! That's all the code you need to build a map demo.\n\nHere's a link to the demo [Map demo](https://huggingface.co/spaces/gradio/map_airbnb) and [complete code](https://huggingface.co/spaces/gradio/map_airbnb/blob/main/run.py) (on Hugging Face Spaces)\n", "html": "

How to Use the Plot Component for Maps

\n\n

Introduction

\n\n

This guide explains how you can use Gradio to plot geographical data on a map using the gradio.Plot component. The Gradio Plot component works with Matplotlib, Bokeh and Plotly. Plotly is what we will be working with in this guide. Plotly allows developers to easily create all sorts of maps with their geographical data. Take a look here for some examples.

\n\n

Overview

\n\n

We will be using the New York City Airbnb dataset, which is hosted on kaggle here. I've uploaded it to the Hugging Face Hub as a dataset here for easier use and download. Using this data we will plot Airbnb locations on a map output and allow filtering based on price and location. Below is the demo that we will be building. \u26a1\ufe0f

\n\n

\n\n

Step 1 - Loading CSV data \ud83d\udcbe

\n\n

Let's start by loading the Airbnb NYC data from the Hugging Face Hub.

\n\n
from datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n    new_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n            (df['price'] > min_price) & (df['price'] < max_price)]\n    names = new_df[\"name\"].tolist()\n    prices = new_df[\"price\"].tolist()\n    text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n
\n\n

In the code above, we first load the csv data into a pandas dataframe. Let's begin by defining a function that we will use as the prediction function for the gradio app. This function will accept the minimum price and maximum price range as well as the list of boroughs to filter the resulting map. We can use the passed in values (min_price, max_price, and list of boroughs) to filter the dataframe and create new_df. Next we will create text_list of the names and prices of each Airbnb to use as labels on the map.

\n\n

Step 2 - Map Figure \ud83c\udf10

\n\n

Plotly makes it easy to work with maps. Let's take a look below how we can create a map figure.

\n\n
import plotly.graph_objects as go\n\nfig = go.Figure(go.Scattermapbox(\n            customdata=text_list,\n            lat=new_df['latitude'].tolist(),\n            lon=new_df['longitude'].tolist(),\n            mode='markers',\n            marker=go.scattermapbox.Marker(\n                size=6\n            ),\n            hoverinfo=\"text\",\n            hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\nfig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n)\n
\n\n

Above, we create a scatter plot on mapbox by passing it our list of latitudes and longitudes to plot markers. We also pass in our custom data of names and prices for additional info to appear on every marker we hover over. Next we use update_layout to specify other map settings such as zoom, and centering.

\n\n

More info here on scatter plots using Mapbox and Plotly.

\n\n

Step 3 - Gradio App \u26a1\ufe0f

\n\n

We will use two gr.Number components and a gr.CheckboxGroup to allow users of our app to specify price ranges and borough locations. We will then use the gr.Plot component as an output for our Plotly + Mapbox map we created earlier.

\n\n
with gr.Blocks() as demo:\n    with gr.Column():\n        with gr.Row():\n            min_price = gr.Number(value=250, label=\"Minimum Price\")\n            max_price = gr.Number(value=1000, label=\"Maximum Price\")\n        boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n        btn = gr.Button(value=\"Update Filter\")\n        map = gr.Plot()\n    demo.load(filter_map, [min_price, max_price, boroughs], map)\n    btn.click(filter_map, [min_price, max_price, boroughs], map)\n
\n\n

We layout these components using the gr.Column and gr.Row and we'll also add event triggers for when the demo first loads and when our \"Update Filter\" button is clicked in order to trigger the map to update with our new filters.

\n\n

This is what the full demo code looks like:

\n\n
import gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n    filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n          (df['price'] > min_price) & (df['price'] < max_price)]\n    names = filtered_df[\"name\"].tolist()\n    prices = filtered_df[\"price\"].tolist()\n    text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n    fig = go.Figure(go.Scattermapbox(\n            customdata=text_list,\n            lat=filtered_df['latitude'].tolist(),\n            lon=filtered_df['longitude'].tolist(),\n            mode='markers',\n            marker=go.scattermapbox.Marker(\n                size=6\n            ),\n            hoverinfo=\"text\",\n            hovertemplate='Name: %{customdata[0]}
Price: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\ndemo.launch()\n
\n\n

Step 4 - Deployment \ud83e\udd17

\n\n

If you run the code above, your app will start running locally.\nYou can even get a temporary shareable link by passing the share=True parameter to launch.

\n\n

But what if you want to a permanent deployment solution?\nLet's deploy our Gradio app to the free HuggingFace Spaces platform.

\n\n

If you haven't used Spaces before, follow the previous guide here.

\n\n

Conclusion \ud83c\udf89

\n\n

And you're all done! That's all the code you need to build a map demo.

\n\n

Here's a link to the demo Map demo and complete code (on Hugging Face Spaces)

\n", "tags": ["PLOTS", "MAPS"], "spaces": [], "url": "/guides/plot-component-for-maps/", "contributor": null}, {"name": "using-gradio-for-tabular-workflows", "category": "tabular-data-science-and-plots", "pretty_category": "Tabular Data Science And Plots", "guide_index": null, "absolute_index": 28, "pretty_name": "Using Gradio For Tabular Workflows", "content": "# Using Gradio for Tabular Data Science Workflows\n\n\n\n\n## Introduction\n\nTabular data science is the most widely used domain of machine learning, with problems ranging from customer segmentation to churn prediction. Throughout various stages of the tabular data science workflow, communicating your work to stakeholders or clients can be cumbersome; which prevents data scientists from focusing on what matters, such as data analysis and model building. Data scientists can end up spending hours building a dashboard that takes in dataframe and returning plots, or returning a prediction or plot of clusters in a dataset. In this guide, we'll go through how to use `gradio` to improve your data science workflows. We will also talk about how to use `gradio` and [skops](https://skops.readthedocs.io/en/stable/) to build interfaces with only one line of code!\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started).\n\n## Let's Create a Simple Interface!\n\nWe will take a look at how we can create a simple UI that predicts failures based on product information. \n\n```python\nimport gradio as gr\nimport pandas as pd\nimport joblib\nimport datasets\n\n\ninputs = [gr.Dataframe(row_count = (2, \"dynamic\"), col_count=(4,\"dynamic\"), label=\"Input Data\", interactive=1)]\n\noutputs = [gr.Dataframe(row_count = (2, \"dynamic\"), col_count=(1, \"fixed\"), label=\"Predictions\", headers=[\"Failures\"])]\n\nmodel = joblib.load(\"model.pkl\")\n\n# we will give our dataframe as example\ndf = datasets.load_dataset(\"merve/supersoaker-failures\")\ndf = df[\"train\"].to_pandas()\n\ndef infer(input_dataframe):\n return pd.DataFrame(model.predict(input_dataframe))\n \ngr.Interface(fn = infer, inputs = inputs, outputs = outputs, examples = [[df.head(2)]]).launch()\n```\n\nLet's break down above code.\n\n* `fn`: the inference function that takes input dataframe and returns predictions.\n* `inputs`: the component we take our input with. We define our input as dataframe with 2 rows and 4 columns, which initially will look like an empty dataframe with the aforementioned shape. When the `row_count` is set to `dynamic`, you don't have to rely on the dataset you're inputting to pre-defined component.\n* `outputs`: The dataframe component that stores outputs. This UI can take single or multiple samples to infer, and returns 0 or 1 for each sample in one column, so we give `row_count` as 2 and `col_count` as 1 above. `headers` is a list made of header names for dataframe.\n* `examples`: You can either pass the input by dragging and dropping a CSV file, or a pandas DataFrame through examples, which headers will be automatically taken by the interface.\n\nWe will now create an example for a minimal data visualization dashboard. You can find a more comprehensive version in the related Spaces.\n\n\n\n```python\nimport gradio as gr\nimport pandas as pd\nimport datasets\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndf = datasets.load_dataset(\"merve/supersoaker-failures\")\ndf = df[\"train\"].to_pandas()\ndf.dropna(axis=0, inplace=True)\n\ndef plot(df):\n plt.scatter(df.measurement_13, df.measurement_15, c = df.loading,alpha=0.5)\n plt.savefig(\"scatter.png\")\n df['failure'].value_counts().plot(kind='bar')\n plt.savefig(\"bar.png\")\n sns.heatmap(df.select_dtypes(include=\"number\").corr())\n plt.savefig(\"corr.png\")\n plots = [\"corr.png\",\"scatter.png\", \"bar.png\"]\n return plots\n \ninputs = [gr.Dataframe(label=\"Supersoaker Production Data\")]\noutputs = [gr.Gallery(label=\"Profiling Dashboard\").style(grid=(1,3))]\n\ngr.Interface(plot, inputs=inputs, outputs=outputs, examples=[df.head(100)], title=\"Supersoaker Failures Analysis Dashboard\").launch()\n```\n\n\n\nWe will use the same dataset we used to train our model, but we will make a dashboard to visualize it this time. \n\n* `fn`: The function that will create plots based on data.\n* `inputs`: We use the same `Dataframe` component we used above.\n* `outputs`: The `Gallery` component is used to keep our visualizations.\n* `examples`: We will have the dataset itself as the example.\n\n## Easily load tabular data interfaces with one line of code using skops\n\n`skops` is a library built on top of `huggingface_hub` and `sklearn`. With the recent `gradio` integration of `skops`, you can build tabular data interfaces with one line of code!\n\n```python\nimport gradio as gr\n\n# title and description are optional\ntitle = \"Supersoaker Defective Product Prediction\"\ndescription = \"This model predicts Supersoaker production line failures. Drag and drop any slice from dataset or edit values as you wish in below dataframe component.\"\n\ngr.Interface.load(\"huggingface/scikit-learn/tabular-playground\", title=title, description=description).launch()\n```\n\n\n\n`sklearn` models pushed to Hugging Face Hub using `skops` include a `config.json` file that contains an example input with column names, the task being solved (that can either be `tabular-classification` or `tabular-regression`). From the task type, `gradio` constructs the `Interface` and consumes column names and the example input to build it. You can [refer to skops documentation on hosting models on Hub](https://skops.readthedocs.io/en/latest/auto_examples/plot_hf_hub.html#sphx-glr-auto-examples-plot-hf-hub-py) to learn how to push your models to Hub using `skops`.\n", "html": "

Using Gradio for Tabular Data Science Workflows

\n\n

Introduction

\n\n

Tabular data science is the most widely used domain of machine learning, with problems ranging from customer segmentation to churn prediction. Throughout various stages of the tabular data science workflow, communicating your work to stakeholders or clients can be cumbersome; which prevents data scientists from focusing on what matters, such as data analysis and model building. Data scientists can end up spending hours building a dashboard that takes in dataframe and returning plots, or returning a prediction or plot of clusters in a dataset. In this guide, we'll go through how to use gradio to improve your data science workflows. We will also talk about how to use gradio and skops to build interfaces with only one line of code!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Let's Create a Simple Interface!

\n\n

We will take a look at how we can create a simple UI that predicts failures based on product information.

\n\n
import gradio as gr\nimport pandas as pd\nimport joblib\nimport datasets\n\n\ninputs = [gr.Dataframe(row_count = (2, \"dynamic\"), col_count=(4,\"dynamic\"), label=\"Input Data\", interactive=1)]\n\noutputs = [gr.Dataframe(row_count = (2, \"dynamic\"), col_count=(1, \"fixed\"), label=\"Predictions\", headers=[\"Failures\"])]\n\nmodel = joblib.load(\"model.pkl\")\n\n# we will give our dataframe as example\ndf = datasets.load_dataset(\"merve/supersoaker-failures\")\ndf = df[\"train\"].to_pandas()\n\ndef infer(input_dataframe):\n  return pd.DataFrame(model.predict(input_dataframe))\n\ngr.Interface(fn = infer, inputs = inputs, outputs = outputs, examples = [[df.head(2)]]).launch()\n
\n\n

Let's break down above code.

\n\n
    \n
  • fn: the inference function that takes input dataframe and returns predictions.
  • \n
  • inputs: the component we take our input with. We define our input as dataframe with 2 rows and 4 columns, which initially will look like an empty dataframe with the aforementioned shape. When the row_count is set to dynamic, you don't have to rely on the dataset you're inputting to pre-defined component.
  • \n
  • outputs: The dataframe component that stores outputs. This UI can take single or multiple samples to infer, and returns 0 or 1 for each sample in one column, so we give row_count as 2 and col_count as 1 above. headers is a list made of header names for dataframe.
  • \n
  • examples: You can either pass the input by dragging and dropping a CSV file, or a pandas DataFrame through examples, which headers will be automatically taken by the interface.
  • \n
\n\n

We will now create an example for a minimal data visualization dashboard. You can find a more comprehensive version in the related Spaces.

\n\n

\n\n
import gradio as gr\nimport pandas as pd\nimport datasets\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndf = datasets.load_dataset(\"merve/supersoaker-failures\")\ndf = df[\"train\"].to_pandas()\ndf.dropna(axis=0, inplace=True)\n\ndef plot(df):\n  plt.scatter(df.measurement_13, df.measurement_15, c = df.loading,alpha=0.5)\n  plt.savefig(\"scatter.png\")\n  df['failure'].value_counts().plot(kind='bar')\n  plt.savefig(\"bar.png\")\n  sns.heatmap(df.select_dtypes(include=\"number\").corr())\n  plt.savefig(\"corr.png\")\n  plots = [\"corr.png\",\"scatter.png\", \"bar.png\"]\n  return plots\n\ninputs = [gr.Dataframe(label=\"Supersoaker Production Data\")]\noutputs = [gr.Gallery(label=\"Profiling Dashboard\").style(grid=(1,3))]\n\ngr.Interface(plot, inputs=inputs, outputs=outputs, examples=[df.head(100)], title=\"Supersoaker Failures Analysis Dashboard\").launch()\n
\n\n

\n\n

We will use the same dataset we used to train our model, but we will make a dashboard to visualize it this time.

\n\n
    \n
  • fn: The function that will create plots based on data.
  • \n
  • inputs: We use the same Dataframe component we used above.
  • \n
  • outputs: The Gallery component is used to keep our visualizations.
  • \n
  • examples: We will have the dataset itself as the example.
  • \n
\n\n

Easily load tabular data interfaces with one line of code using skops

\n\n

skops is a library built on top of huggingface_hub and sklearn. With the recent gradio integration of skops, you can build tabular data interfaces with one line of code!

\n\n
import gradio as gr\n\n# title and description are optional\ntitle = \"Supersoaker Defective Product Prediction\"\ndescription = \"This model predicts Supersoaker production line failures. Drag and drop any slice from dataset or edit values as you wish in below dataframe component.\"\n\ngr.Interface.load(\"huggingface/scikit-learn/tabular-playground\", title=title, description=description).launch()\n
\n\n

\n\n

sklearn models pushed to Hugging Face Hub using skops include a config.json file that contains an example input with column names, the task being solved (that can either be tabular-classification or tabular-regression). From the task type, gradio constructs the Interface and consumes column names and the example input to build it. You can refer to skops documentation on hosting models on Hub to learn how to push your models to Hub using skops.

\n", "tags": [], "spaces": ["https://huggingface.co/spaces/scikit-learn/gradio-skops-integration", "https://huggingface.co/spaces/scikit-learn/tabular-playground", "https://huggingface.co/spaces/merve/gradio-analysis-dashboard"], "url": "/guides/using-gradio-for-tabular-workflows/", "contributor": null}]}, {"category": "Client Libraries", "guides": [{"name": "getting-started-with-the-python-client", "category": "client-libraries", "pretty_category": "Client Libraries", "guide_index": 1, "absolute_index": 29, "pretty_name": "Getting Started With The Python Client", "content": "# Getting Started with the Gradio Python client \n\n\n\n\nThe Gradio Python client makes it very easy to use any Gradio app as an API. As an example, consider this [Hugging Face Space that transcribes audio files](https://huggingface.co/spaces/abidlabs/whisper) that are recorded from the microphone.\n\n![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/whisper-screenshot.jpg)\n\nUsing the `gradio_client` library, we can easily use the Gradio as an API to transcribe audio files programmatically.\n\nHere's the entire code to do it:\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/whisper\") \nclient.predict(\"audio_sample.wav\") \n\n>> \"This is a test of the whisper speech recognition model.\"\n```\n\nThe Gradio client works with any hosted Gradio app, whether it be an image generator, a text summarizer, a stateful chatbot, a tax calculator, or anything else! The Gradio Client is mostly used with apps hosted on [Hugging Face Spaces](https://hf.space), but your app can be hosted anywhere, such as your own server.\n\n**Prerequisites**: To use the Gradio client, you do *not* need to know the `gradio` library in great detail. However, it is helpful to have general familiarity with Gradio's concepts of input and output components.\n\n## Installation\n\nIf you already have a recent version of `gradio`, then the `gradio_client` is included as a dependency. \n\nOtherwise, the lightweight `gradio_client` package can be installed from pip (or pip3) and is tested to work with Python versions 3.9 or higher:\n\n```bash\n$ pip install gradio_client\n```\n\n\n## Connecting to a running Gradio App\n\nStart by connecting instantiating a `Client` object and connecting it to a Gradio app that is running on Hugging Face Spaces or generally anywhere on the web.\n\n## Connecting to a Hugging Face Space\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/en2fr\") # a Space that translates from English to French\n```\n\nYou can also connect to private Spaces by passing in your HF token with the `hf_token` parameter. You can get your HF token here: https://huggingface.co/settings/tokens\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/my-private-space\", hf_token=\"...\") \n```\n\n\n## Duplicating a Space for private use\n\nWhile you can use any public Space as an API, you may get rate limited by Hugging Face if you make too many requests. For unlimited usage of a Space, simply duplicate the Space to create a private Space,\nand then use it to make as many requests as you'd like! \n\nThe `gradio_client` includes a class method: `Client.duplicate()` to make this process simple (you'll need to pass in your [Hugging Face token](https://huggingface.co/settings/tokens) or be logged in using the Hugging Face CLI):\n\n```python\nimport os\nfrom gradio_client import Client\n\nHF_TOKEN = os.environ.get(\"HF_TOKEN\")\n\nclient = Client.duplicate(\"abidlabs/whisper\", hf_token=HF_TOKEN) \nclient.predict(\"audio_sample.wav\") \n\n>> \"This is a test of the whisper speech recognition model.\"\n```\n\nIf you have previously duplicated a Space, re-running `duplicate()` will *not* create a new Space. Instead, the Client will attach to the previously-created Space. So it is safe to re-run the `Client.duplicate()` method multiple times. \n\n**Note:** if the original Space uses GPUs, your private Space will as well, and your Hugging Face account will get billed based on the price of the GPU. To minimize charges, your Space will automatically go to sleep after 1 hour of inactivity. You can also set the hardware using the `hardware` parameter of `duplicate()`.\n\n\n## Connecting a general Gradio app\n\nIf your app is running somewhere else, just provide the full URL instead, including the \"http://\" or \"https://\". Here's an example of making predictions to a Gradio app that is running on a share URL:\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"https://bec81a83-5b5c-471e.gradio.live\")\n```\n\n## Inspecting the API endpoints\n\nOnce you have connected to a Gradio app, you can view the APIs that are available to you by calling the `Client.view_api()` method. For the Whisper Space, we see the following:\n\n```bash\nClient.predict() Usage Info\n---------------------------\nNamed API endpoints: 1\n\n - predict(input_audio, api_name=\"/predict\") -> value_0\n Parameters:\n - [Audio] input_audio: str (filepath or URL)\n Returns:\n - [Textbox] value_0: str (value)\n```\n\nThis shows us that we have 1 API endpoint in this space, and shows us how to use the API endpoint to make a prediction: we should call the `.predict()` method (which we will explore below), providing a parameter `input_audio` of type `str`, which is a `filepath or URL`. \n\nWe should also provide the `api_name='/predict'` argument to the `predict()` method. Although this isn't necessary if a Gradio app has only 1 named endpoint, it does allow us to call different endpoints in a single app if they are available. If an app has unnamed API endpoints, these can also be displayed by running `.view_api(all_endpoints=True)`.\n\n\n## Making a prediction\n\nThe simplest way to make a prediction is simply to call the `.predict()` function with the appropriate arguments:\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/en2fr\", api_name='/predict')\nclient.predict(\"Hello\")\n\n>> Bonjour\n```\n\nIf there are multiple parameters, then you should pass them as separate arguments to `.predict()`, like this:\n\n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"gradio/calculator\")\nclient.predict(4, \"add\", 5)\n\n>> 9.0\n```\n\nFor certain inputs, such as images, you should pass in the filepath or URL to the file. Likewise, for the corresponding output types, you will get a filepath or URL returned. \n\n```python\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/whisper\")\nclient.predict(\"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\")\n\n>> \"My thought I have nobody by a beauty and will as you poured. Mr. Rochester is serve in that so don't find simpus, and devoted abode, to at might in a r\u2014\"\n```\n\n\n## Running jobs asynchronously\n\nOe should note that `.predict()` is a *blocking* operation as it waits for the operation to complete before returning the prediction. \n\nIn many cases, you may be better off letting the job run in the background until you need the results of the prediction. You can do this by creating a `Job` instance using the `.submit()` method, and then later calling `.result()` on the job to get the result. For example:\n\n```python\nfrom gradio_client import Client\n\nclient = Client(space=\"abidlabs/en2fr\")\njob = client.submit(\"Hello\", api_name=\"/predict\") # This is not blocking\n\n# Do something else\n\njob.result() # This is blocking\n\n>> Bonjour\n```\n\n## Adding callbacks\n\nAlternatively, one can add one or more callbacks to perform actions after the job has completed running, like this:\n\n```python\nfrom gradio_client import Client\n\ndef print_result(x):\n print(\"The translated result is: {x}\")\n\nclient = Client(space=\"abidlabs/en2fr\")\n\njob = client.submit(\"Hello\", api_name=\"/predict\", result_callbacks=[print_result])\n\n# Do something else\n\n>> The translated result is: Bonjour\n\n```\n\n## Status\n\nThe `Job` object also allows you to get the status of the running job by calling the `.status()` method. This returns a `StatusUpdate` object with the following attributes: `code` (the status code, one of a set of defined strings representing the status. See the `utils.Status` class), `rank` (the current position of this job in the queue), `queue_size` (the total queue size), `eta` (estimated time this job will complete), `success` (a boolean representing whether the job completed successfully), and `time` (the time that the status was generated). \n\n```py\nfrom gradio_client import Client\n\nclient = Client(src=\"gradio/calculator\")\njob = client.submit(5, \"add\", 4, api_name=\"/predict\")\njob.status()\n\n>> \n```\n\n*Note*: The `Job` class also has a `.done()` instance method which returns a boolean indicating whether the job has completed.\n\n## Cancelling Jobs\n\nThe `Job` class also has a `.cancel()` instance method that cancels jobs that have been queued but not started. For example, if you run:\n\n```py\nclient = Client(\"abidlabs/whisper\") \njob1 = client.submit(\"audio_sample1.wav\") \njob2 = client.submit(\"audio_sample2.wav\") \njob1.cancel() # will return False, assuming the job has started\njob2.cancel() # will return True, indicating that the job has been canceled\n```\n\nIf the first job has started processing, then it will not be canceled. If the second job\nhas not yet started, it will be successfully canceled and removed from the queue. \n\n\n## Generator Endpoints\n\nSome Gradio API endpoints do not return a single value, rather they return a series of values. You can get the series of values that have been returned at any time from such a generator endpoint by running `job.outputs()`:\n\n```py\nfrom gradio_client import Client\n\nclient = Client(src=\"gradio/count_generator\")\njob = client.submit(3, api_name=\"/count\")\nwhile not job.done():\n time.sleep(0.1)\njob.outputs()\n\n>> ['0', '1', '2']\n```\n\nNote that running `job.result()` on a generator endpoint only gives you the *first* value returned by the endpoint. \n\nThe `Job` object is also iterable, which means you can use it to display the results of a generator function as they are returned from the endpoint. Here's the equivalent example using the `Job` as a generator:\n\n```py\nfrom gradio_client import Client\n\nclient = Client(src=\"gradio/count_generator\")\njob = client.submit(3, api_name=\"/count\")\n\nfor o in job:\n print(o)\n\n>> 0\n>> 1\n>> 2\n```\n\nYou can also cancel jobs that that have iterative outputs, in which case the job will finish as soon as the current iteration finishes running.\n\n```py\nfrom gradio_client import Client\nimport time\n\nclient = Client(\"abidlabs/test-yield\")\njob = client.submit(\"abcdef\")\ntime.sleep(3)\njob.cancel() # job cancels after 2 iterations\n```", "html": "

Getting Started with the Gradio Python client

\n\n

The Gradio Python client makes it very easy to use any Gradio app as an API. As an example, consider this Hugging Face Space that transcribes audio files that are recorded from the microphone.

\n\n

\"\"

\n\n

Using the gradio_client library, we can easily use the Gradio as an API to transcribe audio files programmatically.

\n\n

Here's the entire code to do it:

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/whisper\") \nclient.predict(\"audio_sample.wav\")  \n\n>> \"This is a test of the whisper speech recognition model.\"\n
\n\n

The Gradio client works with any hosted Gradio app, whether it be an image generator, a text summarizer, a stateful chatbot, a tax calculator, or anything else! The Gradio Client is mostly used with apps hosted on Hugging Face Spaces, but your app can be hosted anywhere, such as your own server.

\n\n

Prerequisites: To use the Gradio client, you do not need to know the gradio library in great detail. However, it is helpful to have general familiarity with Gradio's concepts of input and output components.

\n\n

Installation

\n\n

If you already have a recent version of gradio, then the gradio_client is included as a dependency.

\n\n

Otherwise, the lightweight gradio_client package can be installed from pip (or pip3) and is tested to work with Python versions 3.9 or higher:

\n\n
$ pip install gradio_client\n
\n\n

Connecting to a running Gradio App

\n\n

Start by connecting instantiating a Client object and connecting it to a Gradio app that is running on Hugging Face Spaces or generally anywhere on the web.

\n\n

Connecting to a Hugging Face Space

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/en2fr\")  # a Space that translates from English to French\n
\n\n

You can also connect to private Spaces by passing in your HF token with the hf_token parameter. You can get your HF token here: https://huggingface.co/settings/tokens

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/my-private-space\", hf_token=\"...\") \n
\n\n

Duplicating a Space for private use

\n\n

While you can use any public Space as an API, you may get rate limited by Hugging Face if you make too many requests. For unlimited usage of a Space, simply duplicate the Space to create a private Space,\nand then use it to make as many requests as you'd like!

\n\n

The gradio_client includes a class method: Client.duplicate() to make this process simple (you'll need to pass in your Hugging Face token or be logged in using the Hugging Face CLI):

\n\n
import os\nfrom gradio_client import Client\n\nHF_TOKEN = os.environ.get(\"HF_TOKEN\")\n\nclient = Client.duplicate(\"abidlabs/whisper\", hf_token=HF_TOKEN) \nclient.predict(\"audio_sample.wav\")  \n\n>> \"This is a test of the whisper speech recognition model.\"\n
\n\n

If you have previously duplicated a Space, re-running duplicate() will not create a new Space. Instead, the Client will attach to the previously-created Space. So it is safe to re-run the Client.duplicate() method multiple times.

\n\n

Note: if the original Space uses GPUs, your private Space will as well, and your Hugging Face account will get billed based on the price of the GPU. To minimize charges, your Space will automatically go to sleep after 1 hour of inactivity. You can also set the hardware using the hardware parameter of duplicate().

\n\n

Connecting a general Gradio app

\n\n

If your app is running somewhere else, just provide the full URL instead, including the \"http://\" or \"https://\". Here's an example of making predictions to a Gradio app that is running on a share URL:

\n\n
from gradio_client import Client\n\nclient = Client(\"https://bec81a83-5b5c-471e.gradio.live\")\n
\n\n

Inspecting the API endpoints

\n\n

Once you have connected to a Gradio app, you can view the APIs that are available to you by calling the Client.view_api() method. For the Whisper Space, we see the following:

\n\n
Client.predict() Usage Info\n---------------------------\nNamed API endpoints: 1\n\n - predict(input_audio, api_name=\"/predict\") -> value_0\n    Parameters:\n     - [Audio] input_audio: str (filepath or URL)\n    Returns:\n     - [Textbox] value_0: str (value)\n
\n\n

This shows us that we have 1 API endpoint in this space, and shows us how to use the API endpoint to make a prediction: we should call the .predict() method (which we will explore below), providing a parameter input_audio of type str, which is a filepath or URL.

\n\n

We should also provide the api_name='/predict' argument to the predict() method. Although this isn't necessary if a Gradio app has only 1 named endpoint, it does allow us to call different endpoints in a single app if they are available. If an app has unnamed API endpoints, these can also be displayed by running .view_api(all_endpoints=True).

\n\n

Making a prediction

\n\n

The simplest way to make a prediction is simply to call the .predict() function with the appropriate arguments:

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/en2fr\", api_name='/predict')\nclient.predict(\"Hello\")\n\n>> Bonjour\n
\n\n

If there are multiple parameters, then you should pass them as separate arguments to .predict(), like this:

\n\n
from gradio_client import Client\n\nclient = Client(\"gradio/calculator\")\nclient.predict(4, \"add\", 5)\n\n>> 9.0\n
\n\n

For certain inputs, such as images, you should pass in the filepath or URL to the file. Likewise, for the corresponding output types, you will get a filepath or URL returned.

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/whisper\")\nclient.predict(\"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\")\n\n>> \"My thought I have nobody by a beauty and will as you poured. Mr. Rochester is serve in that so don't find simpus, and devoted abode, to at might in a r\u2014\"\n
\n\n

Running jobs asynchronously

\n\n

Oe should note that .predict() is a blocking operation as it waits for the operation to complete before returning the prediction.

\n\n

In many cases, you may be better off letting the job run in the background until you need the results of the prediction. You can do this by creating a Job instance using the .submit() method, and then later calling .result() on the job to get the result. For example:

\n\n
from gradio_client import Client\n\nclient = Client(space=\"abidlabs/en2fr\")\njob = client.submit(\"Hello\", api_name=\"/predict\")  # This is not blocking\n\n# Do something else\n\njob.result()  # This is blocking\n\n>> Bonjour\n
\n\n

Adding callbacks

\n\n

Alternatively, one can add one or more callbacks to perform actions after the job has completed running, like this:

\n\n
from gradio_client import Client\n\ndef print_result(x):\n    print(\"The translated result is: {x}\")\n\nclient = Client(space=\"abidlabs/en2fr\")\n\njob = client.submit(\"Hello\", api_name=\"/predict\", result_callbacks=[print_result])\n\n# Do something else\n\n>> The translated result is: Bonjour\n\n
\n\n

Status

\n\n

The Job object also allows you to get the status of the running job by calling the .status() method. This returns a StatusUpdate object with the following attributes: code (the status code, one of a set of defined strings representing the status. See the utils.Status class), rank (the current position of this job in the queue), queue_size (the total queue size), eta (estimated time this job will complete), success (a boolean representing whether the job completed successfully), and time (the time that the status was generated).

\n\n
from gradio_client import Client\n\nclient = Client(src=\"gradio/calculator\")\njob = client.submit(5, \"add\", 4, api_name=\"/predict\")\njob.status()\n\n>> \n
\n\n

Note: The Job class also has a .done() instance method which returns a boolean indicating whether the job has completed.

\n\n

Cancelling Jobs

\n\n

The Job class also has a .cancel() instance method that cancels jobs that have been queued but not started. For example, if you run:

\n\n
client = Client(\"abidlabs/whisper\") \njob1 = client.submit(\"audio_sample1.wav\")  \njob2 = client.submit(\"audio_sample2.wav\")  \njob1.cancel()  # will return False, assuming the job has started\njob2.cancel()  # will return True, indicating that the job has been canceled\n
\n\n

If the first job has started processing, then it will not be canceled. If the second job\nhas not yet started, it will be successfully canceled and removed from the queue.

\n\n

Generator Endpoints

\n\n

Some Gradio API endpoints do not return a single value, rather they return a series of values. You can get the series of values that have been returned at any time from such a generator endpoint by running job.outputs():

\n\n
from gradio_client import Client\n\nclient = Client(src=\"gradio/count_generator\")\njob = client.submit(3, api_name=\"/count\")\nwhile not job.done():\n    time.sleep(0.1)\njob.outputs()\n\n>> ['0', '1', '2']\n
\n\n

Note that running job.result() on a generator endpoint only gives you the first value returned by the endpoint.

\n\n

The Job object is also iterable, which means you can use it to display the results of a generator function as they are returned from the endpoint. Here's the equivalent example using the Job as a generator:

\n\n
from gradio_client import Client\n\nclient = Client(src=\"gradio/count_generator\")\njob = client.submit(3, api_name=\"/count\")\n\nfor o in job:\n    print(o)\n\n>> 0\n>> 1\n>> 2\n
\n\n

You can also cancel jobs that that have iterative outputs, in which case the job will finish as soon as the current iteration finishes running.

\n\n
from gradio_client import Client\nimport time\n\nclient = Client(\"abidlabs/test-yield\")\njob = client.submit(\"abcdef\")\ntime.sleep(3)\njob.cancel()  # job cancels after 2 iterations\n
\n", "tags": ["CLIENT", "API", "SPACES"], "spaces": [], "url": "/guides/getting-started-with-the-python-client/", "contributor": null}, {"name": "getting-started-with-the-js-client", "category": "client-libraries", "pretty_category": "Client Libraries", "guide_index": 2, "absolute_index": 30, "pretty_name": "Getting Started With The Js Client", "content": "# Getting Started with the Gradio JavaScript client\n\n\n\nThe Gradio JavaScript client makes it very easy to use any Gradio app as an API. As an example, consider this [Hugging Face Space that transcribes audio files](https://huggingface.co/spaces/abidlabs/whisper) that are recorded from the microphone.\n\n![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/whisper-screenshot.jpg)\n\nUsing the `@gradio/client` library, we can easily use the Gradio as an API to transcribe audio files programmatically.\n\nHere's the entire code to do it:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst response = await fetch(\n \"https://github.com/audio-samples/audio-samples.github.io/raw/master/samples/wav/ted_speakers/SalmanKhan/sample-1.wav\"\n);\nconst audio_file = await response.blob();\n\nconst app = await client(\"abidlabs/whisper\");\nconst transcription = await app.predict(\"/predict\", [audio_file]);\n\nconsole.log(transcription.data);\n// [ \"I said the same phrase 30 times.\" ]\n```\n\nThe Gradio client works with any hosted Gradio app, whether it be an image generator, a text summarizer, a stateful chatbot, a tax calculator, or anything else! The Gradio Client is mostly used with apps hosted on [Hugging Face Spaces](https://hf.space), but your app can be hosted anywhere, such as your own server.\n\n**Prequisites**: To use the Gradio client, you do _not_ need to know the `gradio` library in great detail. However, it is helpful to have general familiarity with Gradio's concepts of input and output components.\n\n## Installation\n\nThe lightweight `@gradio/client` package can be installed from the npm registry with a package manager of your choice and support node version 18 and above:\n\n```bash\nnpm i @gradio/client\n```\n\n## Connecting to a running Gradio App\n\nStart by connecting instantiating a `client` instance and connecting it to a Gradio app that is running on Hugging Face Spaces or generally anywhere on the web.\n\n## Connecting to a Hugging Face Space\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = client(\"abidlabs/en2fr\"); // a Space that translates from English to French\n```\n\nYou can also connect to private Spaces by passing in your HF token with the `hf_token` property of the options parameter. You can get your HF token here: https://huggingface.co/settings/tokens\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = client(\"abidlabs/my-private-space\", { hf_token=\"hf_...\" })\n```\n\n## Duplicating a Space for private use\n\nWhile you can use any public Space as an API, you may get rate limited by Hugging Face if you make too many requests. For unlimited usage of a Space, simply duplicate the Space to create a private Space, and then use it to make as many requests as you'd like!\n\nThe `@gradio/client` exports another function, `duplicate`, to make this process simple (you'll need to pass in your [Hugging Face token](https://huggingface.co/settings/tokens)).\n\n`duplicate` is almost identical to `client`, the only difference is under the hood:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst response = await fetch(\n \"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\"\n);\nconst audio_file = await response.blob();\n\nconst app = await duplicate(\"abidlabs/whisper\", { hf_token: \"hf_...\" });\nconst transcription = app.predict(\"/predict\", [audio_file]);\n```\n\nIf you have previously duplicated a Space, re-running `duplicate` will _not_ create a new Space. Instead, the client will attach to the previously-created Space. So it is safe to re-run the `duplicate` method multiple times with the same space.\n\n**Note:** if the original Space uses GPUs, your private Space will as well, and your Hugging Face account will get billed based on the price of the GPU. To minimize charges, your Space will automatically go to sleep after 5 minutes of inactivity. You can also set the hardware using the `hardware` and `timeout` properties of `duplicate`'s options object like this:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await duplicate(\"abidlabs/whisper\", {\n hf_token: \"hf_...\",\n timeout: 60,\n hardware: \"a10g-small\",\n});\n```\n\n## Connecting a general Gradio app\n\nIf your app is running somewhere else, just provide the full URL instead, including the \"http://\" or \"https://\". Here's an example of making predictions to a Gradio app that is running on a share URL:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = client(\"https://bec81a83-5b5c-471e.gradio.live\");\n```\n\n## Inspecting the API endpoints\n\nOnce you have connected to a Gradio app, you can view the APIs that are available to you by calling the `client`'s `view_api` method.\n\nFor the Whisper Space, we can do this:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/whisper\");\n\nconst app_info = await app.view_info();\n\nconsole.log(app_info);\n```\n\nAnd we will see the following:\n\n```json\n{\n \"named_endpoints\": {\n \"/predict\": {\n \"parameters\": [\n {\n \"label\": \"text\",\n \"component\": \"Textbox\",\n \"type\": \"string\"\n }\n ],\n \"returns\": [\n {\n \"label\": \"output\",\n \"component\": \"Textbox\",\n \"type\": \"string\"\n }\n ]\n }\n },\n \"unnamed_endpoints\": {}\n}\n```\n\nThis shows us that we have 1 API endpoint in this space, and shows us how to use the API endpoint to make a prediction: we should call the `.predict()` method (which we will explore below), providing a parameter `input_audio` of type `string`, which is a url to a file.\n\nWe should also provide the `api_name='/predict'` argument to the `predict()` method. Although this isn't necessary if a Gradio app has only 1 named endpoint, it does allow us to call different endpoints in a single app if they are available. If an app has unnamed API endpoints, these can also be displayed by running `.view_api(all_endpoints=True)`.\n\n## Making a prediction\n\nThe simplest way to make a prediction is simply to call the `.predict()` method with the appropriate arguments:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/en2fr\");\nconst result = await app.predict(\"/predict\", [\"Hello\"]);\n```\n\nIf there are multiple parameters, then you should pass them as an array to `.predict()`, like this:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/calculator\");\nconst result = await app.predict(\"/predict\", [4, \"add\", 5]);\n```\n\nFor certain inputs, such as images, you should pass in a `Buffer`, `Blob` or `File` depending on what is most convenient. In node, this would be a `Buffer` or `Blob`; in a browser environment, this would be a `Blob` or `File`.\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst response = await fetch(\n \"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\"\n);\nconst audio_file = await response.blob();\n\nconst app = await client(\"abidlabs/whisper\");\nconst result = await client.predict(\"/predict\", [audio_file]);\n```\n\n## Using events\n\nIf the API you are working with can return results over time, or you wish to access information about the status of a job, you can use the event interface for more flexibility. This is especially useful for iterative endpoints or generator endpoints that will produce a series of values over time as discreet responses.\n\n```js\nimport { client } from \"@gradio/client\";\n\nfunction log_result(payload) {\n const {\n data: [translation],\n } = payload;\n\n console.log(`The translated result is: ${translation}`);\n}\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job = app.submit(\"/predict\", [\"Hello\"]);\n\njob.on(\"data\", log_result);\n```\n\n## Status\n\nThe event interface also allows you to get the status of the running job by listening to the `\"status\"` event. This returns an object with the following attributes: `status` (a human readbale status of the current job, `\"pending\" | \"generating\" | \"complete\" | \"error\"`), `code` (the detailed gradio code for the job), `position` (the current position of this job in the queue), `queue_size` (the total queue size), `eta` (estimated time this job will complete), `success` (a boolean representing whether the job completed successfully), and `time` ( as `Date` object detailing the time that the status was generated).\n\n```js\nimport { client } from \"@gradio/client\";\n\nfunction log_status(status) {\n console.log(\n `The current status for this job is: ${JSON.stringify(status, null, 2)}.`\n );\n}\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job = app.submit(\"/predict\", [\"Hello\"]);\n\njob.on(\"status\", log_status);\n```\n\n## Cancelling Jobs\n\nThe job instance also has a `.cancel()` method that cancels jobs that have been queued but not started. For example, if you run:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job_one = app.submit(\"/predict\", [\"Hello\"]);\nconst job_two = app.submit(\"/predict\", [\"Friends\"]);\n\njob_one.cancel();\njob_two.cancel();\n```\n\nIf the first job has started processing, then it will not be canceled but the client will no longer listen for updates (throwing away the job). If the second job has not yet started, it will be successfully canceled and removed from the queue.\n\n## Generator Endpoints\n\nSome Gradio API endpoints do not return a single value, rather they return a series of values. You can listen for these values in real time using the event interface:\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/count_generator\");\nconst job = app.submit(0, [9]);\n\njob.on(\"data\", (data) => console.log(data));\n```\n\nThis will log out the values as they are generated by the endpoint.\n\nYou can also cancel jobs that that have iterative outputs, in which case the job will finish immediately.\n\n```js\nimport { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/count_generator\");\nconst job = app.submit(0, [9]);\n\njob.on(\"data\", (data) => console.log(data));\n\nsetTimeout(() => {\n job.cancel();\n}, 3000);\n```\n", "html": "

Getting Started with the Gradio JavaScript client

\n\n

The Gradio JavaScript client makes it very easy to use any Gradio app as an API. As an example, consider this Hugging Face Space that transcribes audio files that are recorded from the microphone.

\n\n

\"\"

\n\n

Using the @gradio/client library, we can easily use the Gradio as an API to transcribe audio files programmatically.

\n\n

Here's the entire code to do it:

\n\n
import { client } from \"@gradio/client\";\n\nconst response = await fetch(\n  \"https://github.com/audio-samples/audio-samples.github.io/raw/master/samples/wav/ted_speakers/SalmanKhan/sample-1.wav\"\n);\nconst audio_file = await response.blob();\n\nconst app = await client(\"abidlabs/whisper\");\nconst transcription = await app.predict(\"/predict\", [audio_file]);\n\nconsole.log(transcription.data);\n// [ \"I said the same phrase 30 times.\" ]\n
\n\n

The Gradio client works with any hosted Gradio app, whether it be an image generator, a text summarizer, a stateful chatbot, a tax calculator, or anything else! The Gradio Client is mostly used with apps hosted on Hugging Face Spaces, but your app can be hosted anywhere, such as your own server.

\n\n

Prequisites: To use the Gradio client, you do not need to know the gradio library in great detail. However, it is helpful to have general familiarity with Gradio's concepts of input and output components.

\n\n

Installation

\n\n

The lightweight @gradio/client package can be installed from the npm registry with a package manager of your choice and support node version 18 and above:

\n\n
npm i @gradio/client\n
\n\n

Connecting to a running Gradio App

\n\n

Start by connecting instantiating a client instance and connecting it to a Gradio app that is running on Hugging Face Spaces or generally anywhere on the web.

\n\n

Connecting to a Hugging Face Space

\n\n
import { client } from \"@gradio/client\";\n\nconst app = client(\"abidlabs/en2fr\"); // a Space that translates from English to French\n
\n\n

You can also connect to private Spaces by passing in your HF token with the hf_token property of the options parameter. You can get your HF token here: https://huggingface.co/settings/tokens

\n\n
import { client } from \"@gradio/client\";\n\nconst app = client(\"abidlabs/my-private-space\", { hf_token=\"hf_...\" })\n
\n\n

Duplicating a Space for private use

\n\n

While you can use any public Space as an API, you may get rate limited by Hugging Face if you make too many requests. For unlimited usage of a Space, simply duplicate the Space to create a private Space, and then use it to make as many requests as you'd like!

\n\n

The @gradio/client exports another function, duplicate, to make this process simple (you'll need to pass in your Hugging Face token).

\n\n

duplicate is almost identical to client, the only difference is under the hood:

\n\n
import { client } from \"@gradio/client\";\n\nconst response = await fetch(\n  \"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\"\n);\nconst audio_file = await response.blob();\n\nconst app = await duplicate(\"abidlabs/whisper\", { hf_token: \"hf_...\" });\nconst transcription = app.predict(\"/predict\", [audio_file]);\n
\n\n

If you have previously duplicated a Space, re-running duplicate will not create a new Space. Instead, the client will attach to the previously-created Space. So it is safe to re-run the duplicate method multiple times with the same space.

\n\n

Note: if the original Space uses GPUs, your private Space will as well, and your Hugging Face account will get billed based on the price of the GPU. To minimize charges, your Space will automatically go to sleep after 5 minutes of inactivity. You can also set the hardware using the hardware and timeout properties of duplicate's options object like this:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await duplicate(\"abidlabs/whisper\", {\n  hf_token: \"hf_...\",\n  timeout: 60,\n  hardware: \"a10g-small\",\n});\n
\n\n

Connecting a general Gradio app

\n\n

If your app is running somewhere else, just provide the full URL instead, including the \"http://\" or \"https://\". Here's an example of making predictions to a Gradio app that is running on a share URL:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = client(\"https://bec81a83-5b5c-471e.gradio.live\");\n
\n\n

Inspecting the API endpoints

\n\n

Once you have connected to a Gradio app, you can view the APIs that are available to you by calling the client's view_api method.

\n\n

For the Whisper Space, we can do this:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/whisper\");\n\nconst app_info = await app.view_info();\n\nconsole.log(app_info);\n
\n\n

And we will see the following:

\n\n
{\n  \"named_endpoints\": {\n    \"/predict\": {\n      \"parameters\": [\n        {\n          \"label\": \"text\",\n          \"component\": \"Textbox\",\n          \"type\": \"string\"\n        }\n      ],\n      \"returns\": [\n        {\n          \"label\": \"output\",\n          \"component\": \"Textbox\",\n          \"type\": \"string\"\n        }\n      ]\n    }\n  },\n  \"unnamed_endpoints\": {}\n}\n
\n\n

This shows us that we have 1 API endpoint in this space, and shows us how to use the API endpoint to make a prediction: we should call the .predict() method (which we will explore below), providing a parameter input_audio of type string, which is a url to a file.

\n\n

We should also provide the api_name='/predict' argument to the predict() method. Although this isn't necessary if a Gradio app has only 1 named endpoint, it does allow us to call different endpoints in a single app if they are available. If an app has unnamed API endpoints, these can also be displayed by running .view_api(all_endpoints=True).

\n\n

Making a prediction

\n\n

The simplest way to make a prediction is simply to call the .predict() method with the appropriate arguments:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/en2fr\");\nconst result = await app.predict(\"/predict\", [\"Hello\"]);\n
\n\n

If there are multiple parameters, then you should pass them as an array to .predict(), like this:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/calculator\");\nconst result = await app.predict(\"/predict\", [4, \"add\", 5]);\n
\n\n

For certain inputs, such as images, you should pass in a Buffer, Blob or File depending on what is most convenient. In node, this would be a Buffer or Blob; in a browser environment, this would be a Blob or File.

\n\n
import { client } from \"@gradio/client\";\n\nconst response = await fetch(\n  \"https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3\"\n);\nconst audio_file = await response.blob();\n\nconst app = await client(\"abidlabs/whisper\");\nconst result = await client.predict(\"/predict\", [audio_file]);\n
\n\n

Using events

\n\n

If the API you are working with can return results over time, or you wish to access information about the status of a job, you can use the event interface for more flexibility. This is especially useful for iterative endpoints or generator endpoints that will produce a series of values over time as discreet responses.

\n\n
import { client } from \"@gradio/client\";\n\nfunction log_result(payload) {\n  const {\n    data: [translation],\n  } = payload;\n\n  console.log(`The translated result is: ${translation}`);\n}\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job = app.submit(\"/predict\", [\"Hello\"]);\n\njob.on(\"data\", log_result);\n
\n\n

Status

\n\n

The event interface also allows you to get the status of the running job by listening to the \"status\" event. This returns an object with the following attributes: status (a human readbale status of the current job, \"pending\" | \"generating\" | \"complete\" | \"error\"), code (the detailed gradio code for the job), position (the current position of this job in the queue), queue_size (the total queue size), eta (estimated time this job will complete), success (a boolean representing whether the job completed successfully), and time ( as Date object detailing the time that the status was generated).

\n\n
import { client } from \"@gradio/client\";\n\nfunction log_status(status) {\n  console.log(\n    `The current status for this job is: ${JSON.stringify(status, null, 2)}.`\n  );\n}\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job = app.submit(\"/predict\", [\"Hello\"]);\n\njob.on(\"status\", log_status);\n
\n\n

Cancelling Jobs

\n\n

The job instance also has a .cancel() method that cancels jobs that have been queued but not started. For example, if you run:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"abidlabs/en2fr\");\nconst job_one = app.submit(\"/predict\", [\"Hello\"]);\nconst job_two = app.submit(\"/predict\", [\"Friends\"]);\n\njob_one.cancel();\njob_two.cancel();\n
\n\n

If the first job has started processing, then it will not be canceled but the client will no longer listen for updates (throwing away the job). If the second job has not yet started, it will be successfully canceled and removed from the queue.

\n\n

Generator Endpoints

\n\n

Some Gradio API endpoints do not return a single value, rather they return a series of values. You can listen for these values in real time using the event interface:

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/count_generator\");\nconst job = app.submit(0, [9]);\n\njob.on(\"data\", (data) => console.log(data));\n
\n\n

This will log out the values as they are generated by the endpoint.

\n\n

You can also cancel jobs that that have iterative outputs, in which case the job will finish immediately.

\n\n
import { client } from \"@gradio/client\";\n\nconst app = await client(\"gradio/count_generator\");\nconst job = app.submit(0, [9]);\n\njob.on(\"data\", (data) => console.log(data));\n\nsetTimeout(() => {\n  job.cancel();\n}, 3000);\n
\n", "tags": ["CLIENT", "API", "SPACES"], "spaces": [], "url": "/guides/getting-started-with-the-js-client/", "contributor": null}, {"name": "fastapi-app-with-the-gradio-client", "category": "client-libraries", "pretty_category": "Client Libraries", "guide_index": null, "absolute_index": 31, "pretty_name": "Fastapi App With The Gradio Client", "content": "# Building a FastAPI App with the Gradio Python Client\n\n\n\nIn this blog post, we will demonstrate how to use the `gradio_client` [Python library](getting-started-with-the-python-client/), which enables developers to make requests to a Gradio app programmatically, by creating an example FastAPI web app. The web app we will be building is called \"Acapellify,\" and it will allow users to upload video files as input and return a version of that video without instrumental music. It will also display a gallery of generated videos.\n\n\n**Prerequisites**\n\nBefore we begin, make sure you are running Python 3.9 or later, and have the following libraries installed:\n\n* `gradio_client`\n* `fastapi`\n* `uvicorn`\n\nYou can install these libraries from `pip`: \n\n```bash\n$ pip install gradio_client fastapi uvicorn\n```\n\nYou will also need to have ffmpeg installed. You can check to see if you already have ffmpeg by running in your terminal:\n\n```bash\n$ ffmpeg version\n```\n\nOtherwise, install ffmpeg [by following these instructions](https://www.hostinger.com/tutorials/how-to-install-ffmpeg).\n\n## Step 1: Write the Video Processing Function\n\nLet's start with what seems like the most complex bit -- using machine learning to remove the music from a video. \n\nLuckily for us, there's an existing Space we can use to make this process easier: [https://huggingface.co/spaces/abidlabs/music-separation](https://huggingface.co/spaces/abidlabs/music-separation). This Space takes an audio file and produces two separate audio files: one with the instrumental music and one with all other sounds in the original clip. Perfect to use with our client! \n\nOpen a new Python file, say `main.py`, and start by importing the `Client` class from `gradio_client` and connecting it to this Space:\n\n```py\nfrom gradio_client import Client\n\nclient = Client(\"abidlabs/music-separation\")\n\ndef acapellify(audio_path):\n result = client.predict(audio_path, api_name=\"/predict\")\n return result[0]\n```\n\nThat's all the code that's needed -- notice that the API endpoints returns two audio files (one without the music, and one with just the music) in a list, and so we just return the first element of the list. \n\n---\n\n**Note**: since this is a public Space, there might be other users using this Space as well, which might result in a slow experience. You can duplicate this Space with your own [Hugging Face token](https://huggingface.co/settings/tokens) and create a private Space that only you have will have access to and bypass the queue. To do that, simply replace the first two lines above with: \n\n```py\nfrom gradio_client import Client\n\nclient = Client.duplicate(\"abidlabs/music-separation\", hf_token=YOUR_HF_TOKEN)\n```\n\nEverything else remains the same!\n\n---\n\nNow, of course, we are working with video files, so we first need to extract the audio from the video files. For this, we will be using the `ffmpeg` library, which does a lot of heavy lifting when it comes to working with audio and video files. The most common way to use `ffmpeg` is through the command line, which we'll call via Python's `subprocess` module:\n\nOur video processing workflow will consist of three steps: \n\n1. First, we start by taking in a video filepath and extracting the audio using `ffmpeg`. \n2. Then, we pass in the audio file through the `acapellify()` function above.\n3. Finally, we combine the new audio with the original video to produce a final acapellified video. \n\nHere's the complete code in Python, which you can add to your `main.py` file:\n\n```python\nimport subprocess\n\ndef process_video(video_path):\n old_audio = os.path.basename(video_path).split(\".\")[0] + \".m4a\"\n subprocess.run(['ffmpeg', '-y', '-i', video_path, '-vn', '-acodec', 'copy', old_audio])\n \n new_audio = acapellify(old_audio)\n \n new_video = f\"acap_{video_path}\"\n subprocess.call(['ffmpeg', '-y', '-i', video_path, '-i', new_audio, '-map', '0:v', '-map', '1:a', '-c:v', 'copy', '-c:a', 'aac', '-strict', 'experimental', f\"static/{new_video}\"])\n return new_video\n```\n\nYou can read up on [ffmpeg documentation](https://ffmpeg.org/ffmpeg.html) if you'd like to understand all of the command line parameters, as they are beyond the scope of this tutorial.\n\n## Step 2: Create a FastAPI app (Backend Routes)\n\nNext up, we'll create a simple FastAPI app. If you haven't used FastAPI before, check out [the great FastAPI docs](https://fastapi.tiangolo.com/). Otherwise, this basic template, which we add to `main.py`, will look pretty familiar:\n\n```python\nimport os\nfrom fastapi import FastAPI, File, UploadFile, Request\nfrom fastapi.responses import HTMLResponse, RedirectResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\n\napp = FastAPI()\nos.makedirs(\"static\", exist_ok=True)\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\ntemplates = Jinja2Templates(directory=\"templates\")\n\nvideos = []\n\n@app.get(\"/\", response_class=HTMLResponse)\nasync def home(request: Request):\n return templates.TemplateResponse(\n \"home.html\", {\"request\": request, \"videos\": videos})\n\n@app.post(\"/uploadvideo/\")\nasync def upload_video(video: UploadFile = File(...)):\n new_video = process_video(video.filename)\n videos.append(new_video)\n return RedirectResponse(url='/', status_code=303)\n```\n\nIn this example, the FastAPI app has two routes: `/` and `/uploadvideo/`.\n\nThe `/` route returns an HTML template that displays a gallery of all uploaded videos. \n\nThe `/uploadvideo/` route accepts a `POST` request with an `UploadFile` object, which represents the uploaded video file. The video file is \"acapellified\" via the `process_video()` method, and the output video is stored in a list which stores all of the uploaded videos in memory.\n\nNote that this is a very basic example and if this were a production app, you will need to add more logic to handle file storage, user authentication, and security considerations. \n\n## Step 3: Create a FastAPI app (Frontend Template)\n\nFinally, we create the frontend of our web application. First, we create a folder called `templates` in the same directory as `main.py`. We then create a template, `home.html` inside the `templates` folder. Here is the resulting file structure:\n\n```csv\n\u251c\u2500\u2500 main.py\n\u251c\u2500\u2500 templates\n\u2502 \u2514\u2500\u2500 home.html\n```\n\nWrite the following as the contents of `home.html`:\n\n```html\n<!DOCTYPE html>\n<html>\n <head>\n <title>Video Gallery</title>\n <style>\n body {\n font-family: sans-serif;\n margin: 0;\n padding: 0;\n background-color: #f5f5f5;\n }\n h1 {\n text-align: center;\n margin-top: 30px;\n margin-bottom: 20px;\n }\n .gallery {\n display: flex;\n flex-wrap: wrap;\n justify-content: center;\n gap: 20px;\n padding: 20px;\n }\n .video {\n border: 2px solid #ccc;\n box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.2);\n border-radius: 5px;\n overflow: hidden;\n width: 300px;\n margin-bottom: 20px;\n }\n .video video {\n width: 100%;\n height: 200px;\n }\n .video p {\n text-align: center;\n margin: 10px 0;\n }\n form {\n margin-top: 20px;\n text-align: center;\n }\n input[type=\"file\"] {\n display: none;\n }\n .upload-btn {\n display: inline-block;\n background-color: #3498db;\n color: #fff;\n padding: 10px 20px;\n font-size: 16px;\n border: none;\n border-radius: 5px;\n cursor: pointer;\n }\n .upload-btn:hover {\n background-color: #2980b9;\n }\n .file-name {\n margin-left: 10px;\n }\n </style>\n </head>\n <body>\n <h1>Video Gallery</h1>\n {% if videos %}\n <div class=\"gallery\">\n {% for video in videos %}\n <div class=\"video\">\n <video controls>\n <source src=\"{{ url_for('static', path=video) }}\" type=\"video/mp4\">\n Your browser does not support the video tag.\n </video>\n <p>{{ video }}</p>\n </div>\n {% endfor %}\n </div>\n {% else %}\n <p>No videos uploaded yet.</p>\n {% endif %}\n <form action=\"/uploadvideo/\" method=\"post\" enctype=\"multipart/form-data\">\n <label for=\"video-upload\" class=\"upload-btn\">Choose video file</label>\n <input type=\"file\" name=\"video\" id=\"video-upload\">\n <span class=\"file-name\"></span>\n <button type=\"submit\" class=\"upload-btn\">Upload</button>\n </form>\n <script>\n // Display selected file name in the form\n const fileUpload = document.getElementById(\"video-upload\");\n const fileName = document.querySelector(\".file-name\");\n\n fileUpload.addEventListener(\"change\", (e) => {\n fileName.textContent = e.target.files[0].name;\n });\n </script>\n </body>\n</html>\n```\n\n## Step 4: Run your FastAPI app\n\nFinally, we are ready to run our FastAPI app, powered by the Gradio Python Client!\n\nOpen up a terminal and navigate to the directory containing `main.py`. Then run the following command in the terminal:\n\n```bash\n$ uvicorn main:app\n```\n\nYou should see an output that looks like this:\n\n```csv\nLoaded as API: https://abidlabs-music-separation.hf.space \u2714\nINFO: Started server process [1360]\nINFO: Waiting for application startup.\nINFO: Application startup complete.\nINFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)\n```\n\nAnd that's it! Start uploading videos and you'll get some \"acapellified\" videos in response (might take seconds to minutes to process depending on the length of your videos). Here's how the UI looks after uploading two videos:\n\n![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gradio-guides/acapellify.png)\n\n If you'd like to learn more about how to use the Gradio Python Client in your projects, [read the dedicated Guide](/guides/getting-started-with-the-python-client/).\n\n", "html": "

Building a FastAPI App with the Gradio Python Client

\n\n

In this blog post, we will demonstrate how to use the gradio_client Python library, which enables developers to make requests to a Gradio app programmatically, by creating an example FastAPI web app. The web app we will be building is called \"Acapellify,\" and it will allow users to upload video files as input and return a version of that video without instrumental music. It will also display a gallery of generated videos.

\n\n

Prerequisites

\n\n

Before we begin, make sure you are running Python 3.9 or later, and have the following libraries installed:

\n\n
    \n
  • gradio_client
  • \n
  • fastapi
  • \n
  • uvicorn
  • \n
\n\n

You can install these libraries from pip:

\n\n
$ pip install gradio_client fastapi uvicorn\n
\n\n

You will also need to have ffmpeg installed. You can check to see if you already have ffmpeg by running in your terminal:

\n\n
$ ffmpeg version\n
\n\n

Otherwise, install ffmpeg by following these instructions.

\n\n

Step 1: Write the Video Processing Function

\n\n

Let's start with what seems like the most complex bit -- using machine learning to remove the music from a video.

\n\n

Luckily for us, there's an existing Space we can use to make this process easier: https://huggingface.co/spaces/abidlabs/music-separation. This Space takes an audio file and produces two separate audio files: one with the instrumental music and one with all other sounds in the original clip. Perfect to use with our client!

\n\n

Open a new Python file, say main.py, and start by importing the Client class from gradio_client and connecting it to this Space:

\n\n
from gradio_client import Client\n\nclient = Client(\"abidlabs/music-separation\")\n\ndef acapellify(audio_path):\n    result = client.predict(audio_path, api_name=\"/predict\")\n    return result[0]\n
\n\n

That's all the code that's needed -- notice that the API endpoints returns two audio files (one without the music, and one with just the music) in a list, and so we just return the first element of the list.

\n\n
\n\n

Note: since this is a public Space, there might be other users using this Space as well, which might result in a slow experience. You can duplicate this Space with your own Hugging Face token and create a private Space that only you have will have access to and bypass the queue. To do that, simply replace the first two lines above with:

\n\n
from gradio_client import Client\n\nclient = Client.duplicate(\"abidlabs/music-separation\", hf_token=YOUR_HF_TOKEN)\n
\n\n

Everything else remains the same!

\n\n
\n\n

Now, of course, we are working with video files, so we first need to extract the audio from the video files. For this, we will be using the ffmpeg library, which does a lot of heavy lifting when it comes to working with audio and video files. The most common way to use ffmpeg is through the command line, which we'll call via Python's subprocess module:

\n\n

Our video processing workflow will consist of three steps:

\n\n
    \n
  1. First, we start by taking in a video filepath and extracting the audio using ffmpeg.
  2. \n
  3. Then, we pass in the audio file through the acapellify() function above.
  4. \n
  5. Finally, we combine the new audio with the original video to produce a final acapellified video.
  6. \n
\n\n

Here's the complete code in Python, which you can add to your main.py file:

\n\n
import subprocess\n\ndef process_video(video_path):\n    old_audio = os.path.basename(video_path).split(\".\")[0] + \".m4a\"\n    subprocess.run(['ffmpeg', '-y', '-i', video_path, '-vn', '-acodec', 'copy', old_audio])\n\n    new_audio = acapellify(old_audio)\n\n    new_video = f\"acap_{video_path}\"\n    subprocess.call(['ffmpeg', '-y', '-i', video_path, '-i', new_audio, '-map', '0:v', '-map', '1:a', '-c:v', 'copy', '-c:a', 'aac', '-strict', 'experimental', f\"static/{new_video}\"])\n    return new_video\n
\n\n

You can read up on ffmpeg documentation if you'd like to understand all of the command line parameters, as they are beyond the scope of this tutorial.

\n\n

Step 2: Create a FastAPI app (Backend Routes)

\n\n

Next up, we'll create a simple FastAPI app. If you haven't used FastAPI before, check out the great FastAPI docs. Otherwise, this basic template, which we add to main.py, will look pretty familiar:

\n\n
import os\nfrom fastapi import FastAPI, File, UploadFile, Request\nfrom fastapi.responses import HTMLResponse, RedirectResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\n\napp = FastAPI()\nos.makedirs(\"static\", exist_ok=True)\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\ntemplates = Jinja2Templates(directory=\"templates\")\n\nvideos = []\n\n@app.get(\"/\", response_class=HTMLResponse)\nasync def home(request: Request):\n    return templates.TemplateResponse(\n        \"home.html\", {\"request\": request, \"videos\": videos})\n\n@app.post(\"/uploadvideo/\")\nasync def upload_video(video: UploadFile = File(...)):\n    new_video = process_video(video.filename)\n    videos.append(new_video)\n    return RedirectResponse(url='/', status_code=303)\n
\n\n

In this example, the FastAPI app has two routes: / and /uploadvideo/.

\n\n

The / route returns an HTML template that displays a gallery of all uploaded videos.

\n\n

The /uploadvideo/ route accepts a POST request with an UploadFile object, which represents the uploaded video file. The video file is \"acapellified\" via the process_video() method, and the output video is stored in a list which stores all of the uploaded videos in memory.

\n\n

Note that this is a very basic example and if this were a production app, you will need to add more logic to handle file storage, user authentication, and security considerations.

\n\n

Step 3: Create a FastAPI app (Frontend Template)

\n\n

Finally, we create the frontend of our web application. First, we create a folder called templates in the same directory as main.py. We then create a template, home.html inside the templates folder. Here is the resulting file structure:

\n\n
\u251c\u2500\u2500 main.py\n\u251c\u2500\u2500 templates\n\u2502   \u2514\u2500\u2500 home.html\n
\n\n

Write the following as the contents of home.html:

\n\n
<!DOCTYPE html>\n<html>\n  <head>\n    <title>Video Gallery</title>\n    <style>\n      body {\n        font-family: sans-serif;\n        margin: 0;\n        padding: 0;\n        background-color: #f5f5f5;\n      }\n      h1 {\n        text-align: center;\n        margin-top: 30px;\n        margin-bottom: 20px;\n      }\n      .gallery {\n        display: flex;\n        flex-wrap: wrap;\n        justify-content: center;\n        gap: 20px;\n        padding: 20px;\n      }\n      .video {\n        border: 2px solid #ccc;\n        box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.2);\n        border-radius: 5px;\n        overflow: hidden;\n        width: 300px;\n        margin-bottom: 20px;\n      }\n      .video video {\n        width: 100%;\n        height: 200px;\n      }\n      .video p {\n        text-align: center;\n        margin: 10px 0;\n      }\n      form {\n        margin-top: 20px;\n        text-align: center;\n      }\n      input[type=\"file\"] {\n        display: none;\n      }\n      .upload-btn {\n        display: inline-block;\n        background-color: #3498db;\n        color: #fff;\n        padding: 10px 20px;\n        font-size: 16px;\n        border: none;\n        border-radius: 5px;\n        cursor: pointer;\n      }\n      .upload-btn:hover {\n        background-color: #2980b9;\n      }\n      .file-name {\n        margin-left: 10px;\n      }\n    </style>\n  </head>\n  <body>\n    <h1>Video Gallery</h1>\n    {% if videos %}\n      <div class=\"gallery\">\n        {% for video in videos %}\n          <div class=\"video\">\n            <video controls>\n              <source src=\"{{ url_for('static', path=video) }}\" type=\"video/mp4\">\n              Your browser does not support the video tag.\n            </video>\n            <p>{{ video }}</p>\n          </div>\n        {% endfor %}\n      </div>\n    {% else %}\n      <p>No videos uploaded yet.</p>\n    {% endif %}\n    <form action=\"/uploadvideo/\" method=\"post\" enctype=\"multipart/form-data\">\n      <label for=\"video-upload\" class=\"upload-btn\">Choose video file</label>\n      <input type=\"file\" name=\"video\" id=\"video-upload\">\n      <span class=\"file-name\"></span>\n      <button type=\"submit\" class=\"upload-btn\">Upload</button>\n    </form>\n    <script>\n      // Display selected file name in the form\n      const fileUpload = document.getElementById(\"video-upload\");\n      const fileName = document.querySelector(\".file-name\");\n\n      fileUpload.addEventListener(\"change\", (e) => {\n        fileName.textContent = e.target.files[0].name;\n      });\n    </script>\n  </body>\n</html>\n
\n\n

Step 4: Run your FastAPI app

\n\n

Finally, we are ready to run our FastAPI app, powered by the Gradio Python Client!

\n\n

Open up a terminal and navigate to the directory containing main.py. Then run the following command in the terminal:

\n\n
$ uvicorn main:app\n
\n\n

You should see an output that looks like this:

\n\n
Loaded as API: https://abidlabs-music-separation.hf.space \u2714\nINFO:     Started server process [1360]\nINFO:     Waiting for application startup.\nINFO:     Application startup complete.\nINFO:     Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)\n
\n\n

And that's it! Start uploading videos and you'll get some \"acapellified\" videos in response (might take seconds to minutes to process depending on the length of your videos). Here's how the UI looks after uploading two videos:

\n\n

\"\"

\n\n

If you'd like to learn more about how to use the Gradio Python Client in your projects, read the dedicated Guide.

\n", "tags": ["CLIENT", "API", "WEB APP"], "spaces": [], "url": "/guides/fastapi-app-with-the-gradio-client/", "contributor": null}, {"name": "gradio-and-llm-agents", "category": "client-libraries", "pretty_category": "Client Libraries", "guide_index": null, "absolute_index": 32, "pretty_name": "Gradio And Llm Agents", "content": "# Gradio & LLM Agents \ud83e\udd1d\n\nLarge Language Models (LLMs) are very impressive but they can be made even more powerful if we could give them skills to accomplish specialized tasks.\n\nThe [gradio_tools](https://github.com/freddyaboulton/gradio-tools) library can turn any [Gradio](https://github.com/gradio-app/gradio) application into a [tool](https://python.langchain.com/en/latest/modules/agents/tools.html) that an [agent](https://docs.langchain.com/docs/components/agents/agent) can use to complete its task. For example, an LLM could use a Gradio tool to transcribe a voice recording it finds online and then summarize it for you. Or it could use a different Gradio tool to apply OCR to a document on your Google Drive and then answer questions about it.\n\nThis guide will show how you can use `gradio_tools` to grant your LLM Agent access to the cutting edge Gradio applications hosted in the world. Although `gradio_tools` are compatible with more than one agent framework, we will focus on [Langchain Agents](https://docs.langchain.com/docs/components/agents/) in this guide.\n\n## Some background\n\n### What are agents?\n\nA [LangChain agent](https://docs.langchain.com/docs/components/agents/agent) is a Large Language Model (LLM) that takes user input and reports an output based on using one of many tools at its disposal.\n\n### What is Gradio?\n[Gradio](https://github.com/gradio-app/gradio) is the defacto standard framework for building Machine Learning Web Applications and sharing them with the world - all with just python! \ud83d\udc0d\n\n## gradio_tools - An end-to-end example\n\nTo get started with `gradio_tools`, all you need to do is import and initialize your tools and pass them to the langchain agent!\n\nIn the following example, we import the `StableDiffusionPromptGeneratorTool` to create a good prompt for stable diffusion, the\n`StableDiffusionTool` to create an image with our improved prompt, the `ImageCaptioningTool` to caption the generated image, and\nthe `TextToVideoTool` to create a video from a prompt. \n\nWe then tell our agent to create an image of a dog riding a skateboard, but to please improve our prompt ahead of time. We also ask\nit to caption the generated image and create a video for it. The agent can decide which tool to use without us explicitly telling it.\n\n```python\nimport os\n\nif not os.getenv(\"OPENAI_API_KEY\"):\n raise ValueError(\"OPENAI_API_KEY must be set\")\n\nfrom langchain.agents import initialize_agent\nfrom langchain.llms import OpenAI\nfrom gradio_tools import (StableDiffusionTool, ImageCaptioningTool, StableDiffusionPromptGeneratorTool,\n TextToVideoTool)\n\nfrom langchain.memory import ConversationBufferMemory\n\nllm = OpenAI(temperature=0)\nmemory = ConversationBufferMemory(memory_key=\"chat_history\")\ntools = [StableDiffusionTool().langchain, ImageCaptioningTool().langchain,\n StableDiffusionPromptGeneratorTool().langchain, TextToVideoTool().langchain]\n\n\nagent = initialize_agent(tools, llm, memory=memory, agent=\"conversational-react-description\", verbose=True)\noutput = agent.run(input=(\"Please create a photo of a dog riding a skateboard \"\n \"but improve my prompt prior to using an image generator.\"\n \"Please caption the generated image and create a video for it using the improved prompt.\"))\n```\n\nYou'll note that we are using some pre-built tools that come with `gradio_tools`. Please see this [doc](https://github.com/freddyaboulton/gradio-tools#gradio-tools-gradio--llm-agents) for a complete list of the tools that come with `gradio_tools`.\nIf you would like to use a tool that's not currently in `gradio_tools`, it is very easy to add your own. That's what the next section will cover.\n\n## gradio_tools - creating your own tool\n\nThe core abstraction is the `GradioTool`, which lets you define a new tool for your LLM as long as you implement a standard interface:\n\n```python\nclass GradioTool(BaseTool):\n\n def __init__(self, name: str, description: str, src: str) -> None:\n\n @abstractmethod\n def create_job(self, query: str) -> Job:\n pass\n\n @abstractmethod\n def postprocess(self, output: Tuple[Any] | Any) -> str:\n pass\n```\nThe requirements are:\n1. The name for your tool\n2. The description for your tool. This is crucial! Agents decide which tool to use based on their description. Be precise and be sure to include example of what the input and the output of the tool should look like.\n3. The url or space id, e.g. `freddyaboulton/calculator`, of the Gradio application. Based on this value, `gradio_tool` will create a [gradio client](https://github.com/gradio-app/gradio/blob/main/client/python/README.md) instance to query the upstream application via API. Be sure to click the link and learn more about the gradio client library if you are not familiar with it.\n4. create_job - Given a string, this method should parse that string and return a job from the client. Most times, this is as simple as passing the string to the `submit` function of the client. More info on creating jobs [here](https://github.com/gradio-app/gradio/blob/main/client/python/README.md#making-a-prediction)\n5. postprocess - Given the result of the job, convert it to a string the LLM can display to the user.\n6. *Optional* - Some libraries, e.g. [MiniChain](https://github.com/srush/MiniChain/tree/main), may need some info about the underlying gradio input and output types used by the tool. By default, this will return gr.Textbox() but \nif you'd like to provide more accurate info, implement the `_block_input(self, gr)` and `_block_output(self, gr)` methods of the tool. The `gr` variable is the gradio module (the result of `import gradio as gr`). It will be\nautomatically imported by the `GradiTool` parent class and passed to the `_block_input` and `_block_output` methods.\n\nAnd that's it!\n\nOnce you have created your tool, open a pull request to the `gradio_tools` repo! We welcome all contributions.\n\n## Example tool - Stable Diffusion\n\nHere is the code for the StableDiffusion tool as an example:\n\n```python\nfrom gradio_tool import GradioTool\nimport os\n\nclass StableDiffusionTool(GradioTool):\n \"\"\"Tool for calling stable diffusion from llm\"\"\"\n\n def __init__(\n self,\n name=\"StableDiffusion\",\n description=(\n \"An image generator. Use this to generate images based on \"\n \"text input. Input should be a description of what the image should \"\n \"look like. The output will be a path to an image file.\"\n ),\n src=\"gradio-client-demos/stable-diffusion\",\n hf_token=None,\n ) -> None:\n super().__init__(name, description, src, hf_token)\n\n def create_job(self, query: str) -> Job:\n return self.client.submit(query, \"\", 9, fn_index=1)\n\n def postprocess(self, output: str) -> str:\n return [os.path.join(output, i) for i in os.listdir(output) if not i.endswith(\"json\")][0]\n\n def _block_input(self, gr) -> \"gr.components.Component\":\n return gr.Textbox()\n\n def _block_output(self, gr) -> \"gr.components.Component\":\n return gr.Image()\n```\n\nSome notes on this implementation:\n1. All instances of `GradioTool` have an attribute called `client` that is a pointed to the underlying [gradio client](https://github.com/gradio-app/gradio/tree/main/client/python#gradio_client-use-a-gradio-app-as-an-api----in-3-lines-of-python). That is what you should use\nin the `create_job` method.\n2. `create_job` just passes the query string to the `submit` function of the client with some other parameters hardcoded, i.e. the negative prompt string and the guidance scale. We could modify our tool to also accept these values from the input string in a subsequent version.\n3. The `postprocess` method simply returns the first image from the gallery of images created by the stable diffusion space. We use the `os` module to get the full path of the image.\n\n## Conclusion\n\nYou now know how to extend the abilities of your LLM with the 1000s of gradio spaces running in the wild!\nAgain, we welcome any contributions to the [gradio_tools](https://github.com/freddyaboulton/gradio-tools) library.\nWe're excited to see the tools you all build!\n\n", "html": "

Gradio & LLM Agents \ud83e\udd1d

\n\n

Large Language Models (LLMs) are very impressive but they can be made even more powerful if we could give them skills to accomplish specialized tasks.

\n\n

The gradio_tools library can turn any Gradio application into a tool that an agent can use to complete its task. For example, an LLM could use a Gradio tool to transcribe a voice recording it finds online and then summarize it for you. Or it could use a different Gradio tool to apply OCR to a document on your Google Drive and then answer questions about it.

\n\n

This guide will show how you can use gradio_tools to grant your LLM Agent access to the cutting edge Gradio applications hosted in the world. Although gradio_tools are compatible with more than one agent framework, we will focus on Langchain Agents in this guide.

\n\n

Some background

\n\n

What are agents?

\n\n

A LangChain agent is a Large Language Model (LLM) that takes user input and reports an output based on using one of many tools at its disposal.

\n\n

What is Gradio?

\n\n

Gradio is the defacto standard framework for building Machine Learning Web Applications and sharing them with the world - all with just python! \ud83d\udc0d

\n\n

gradio_tools - An end-to-end example

\n\n

To get started with gradio_tools, all you need to do is import and initialize your tools and pass them to the langchain agent!

\n\n

In the following example, we import the StableDiffusionPromptGeneratorTool to create a good prompt for stable diffusion, the\nStableDiffusionTool to create an image with our improved prompt, the ImageCaptioningTool to caption the generated image, and\nthe TextToVideoTool to create a video from a prompt.

\n\n

We then tell our agent to create an image of a dog riding a skateboard, but to please improve our prompt ahead of time. We also ask\nit to caption the generated image and create a video for it. The agent can decide which tool to use without us explicitly telling it.

\n\n
import os\n\nif not os.getenv(\"OPENAI_API_KEY\"):\n    raise ValueError(\"OPENAI_API_KEY must be set\")\n\nfrom langchain.agents import initialize_agent\nfrom langchain.llms import OpenAI\nfrom gradio_tools import (StableDiffusionTool, ImageCaptioningTool, StableDiffusionPromptGeneratorTool,\n                          TextToVideoTool)\n\nfrom langchain.memory import ConversationBufferMemory\n\nllm = OpenAI(temperature=0)\nmemory = ConversationBufferMemory(memory_key=\"chat_history\")\ntools = [StableDiffusionTool().langchain, ImageCaptioningTool().langchain,\n         StableDiffusionPromptGeneratorTool().langchain, TextToVideoTool().langchain]\n\n\nagent = initialize_agent(tools, llm, memory=memory, agent=\"conversational-react-description\", verbose=True)\noutput = agent.run(input=(\"Please create a photo of a dog riding a skateboard \"\n                          \"but improve my prompt prior to using an image generator.\"\n                          \"Please caption the generated image and create a video for it using the improved prompt.\"))\n
\n\n

You'll note that we are using some pre-built tools that come with gradio_tools. Please see this doc for a complete list of the tools that come with gradio_tools.\nIf you would like to use a tool that's not currently in gradio_tools, it is very easy to add your own. That's what the next section will cover.

\n\n

gradio_tools - creating your own tool

\n\n

The core abstraction is the GradioTool, which lets you define a new tool for your LLM as long as you implement a standard interface:

\n\n
class GradioTool(BaseTool):\n\n    def __init__(self, name: str, description: str, src: str) -> None:\n\n    @abstractmethod\n    def create_job(self, query: str) -> Job:\n        pass\n\n    @abstractmethod\n    def postprocess(self, output: Tuple[Any] | Any) -> str:\n        pass\n
\n\n

The requirements are:\n1. The name for your tool\n2. The description for your tool. This is crucial! Agents decide which tool to use based on their description. Be precise and be sure to include example of what the input and the output of the tool should look like.\n3. The url or space id, e.g. freddyaboulton/calculator, of the Gradio application. Based on this value, gradio_tool will create a gradio client instance to query the upstream application via API. Be sure to click the link and learn more about the gradio client library if you are not familiar with it.\n4. create_job - Given a string, this method should parse that string and return a job from the client. Most times, this is as simple as passing the string to the submit function of the client. More info on creating jobs here\n5. postprocess - Given the result of the job, convert it to a string the LLM can display to the user.\n6. Optional - Some libraries, e.g. MiniChain, may need some info about the underlying gradio input and output types used by the tool. By default, this will return gr.Textbox() but \nif you'd like to provide more accurate info, implement the _block_input(self, gr) and _block_output(self, gr) methods of the tool. The gr variable is the gradio module (the result of import gradio as gr). It will be\nautomatically imported by the GradiTool parent class and passed to the _block_input and _block_output methods.

\n\n

And that's it!

\n\n

Once you have created your tool, open a pull request to the gradio_tools repo! We welcome all contributions.

\n\n

Example tool - Stable Diffusion

\n\n

Here is the code for the StableDiffusion tool as an example:

\n\n
from gradio_tool import GradioTool\nimport os\n\nclass StableDiffusionTool(GradioTool):\n    \"\"\"Tool for calling stable diffusion from llm\"\"\"\n\n    def __init__(\n        self,\n        name=\"StableDiffusion\",\n        description=(\n            \"An image generator. Use this to generate images based on \"\n            \"text input. Input should be a description of what the image should \"\n            \"look like. The output will be a path to an image file.\"\n        ),\n        src=\"gradio-client-demos/stable-diffusion\",\n        hf_token=None,\n    ) -> None:\n        super().__init__(name, description, src, hf_token)\n\n    def create_job(self, query: str) -> Job:\n        return self.client.submit(query, \"\", 9, fn_index=1)\n\n    def postprocess(self, output: str) -> str:\n        return [os.path.join(output, i) for i in os.listdir(output) if not i.endswith(\"json\")][0]\n\n    def _block_input(self, gr) -> \"gr.components.Component\":\n        return gr.Textbox()\n\n    def _block_output(self, gr) -> \"gr.components.Component\":\n        return gr.Image()\n
\n\n

Some notes on this implementation:\n1. All instances of GradioTool have an attribute called client that is a pointed to the underlying gradio client. That is what you should use\nin the create_job method.\n2. create_job just passes the query string to the submit function of the client with some other parameters hardcoded, i.e. the negative prompt string and the guidance scale. We could modify our tool to also accept these values from the input string in a subsequent version.\n3. The postprocess method simply returns the first image from the gallery of images created by the stable diffusion space. We use the os module to get the full path of the image.

\n\n

Conclusion

\n\n

You now know how to extend the abilities of your LLM with the 1000s of gradio spaces running in the wild!\nAgain, we welcome any contributions to the gradio_tools library.\nWe're excited to see the tools you all build!

\n", "tags": [], "spaces": [], "url": "/guides/gradio-and-llm-agents/", "contributor": null}]}, {"category": "Other Tutorials", "guides": [{"name": "building-a-pictionary-app", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 33, "pretty_name": "Building A Pictionary App", "content": "# Building a Pictionary App\n\n\n\n\n## Introduction\n\nHow well can an algorithm guess what you're drawing? A few years ago, Google released the **Quick Draw** dataset, which contains drawings made by humans of a variety of every objects. Researchers have used this dataset to train models to guess Pictionary-style drawings. \n\nSuch models are perfect to use with Gradio's *sketchpad* input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and will look like this (try drawing something!):\n\n\n\nLet's get started! This guide covers how to build a pictionary app (step-by-step): \n\n1. [Set up the Sketch Recognition Model](#1-set-up-the-sketch-recognition-model)\n2. [Define a `predict` function](#2-define-a-predict-function)\n3. [Create a Gradio Interface](#3-create-a-gradio-interface)\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained sketchpad model, also install `torch`.\n\n## 1. Set up the Sketch Recognition Model\n\nFirst, you will need a sketch recognition model. Since many researchers have already trained their own models on the Quick Draw dataset, we will use a pretrained model in this tutorial. Our model is a light 1.5 MB model trained by Nate Raw, that [you can download here](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/pytorch_model.bin). \n\nIf you are interested, here [is the code](https://github.com/nateraw/quickdraw-pytorch) that was used to train the model. We will simply load the pretrained model in PyTorch, as follows:\n\n```python\nimport torch\nfrom torch import nn\n\nmodel = nn.Sequential(\n nn.Conv2d(1, 32, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(32, 64, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Conv2d(64, 128, 3, padding='same'),\n nn.ReLU(),\n nn.MaxPool2d(2),\n nn.Flatten(),\n nn.Linear(1152, 256),\n nn.ReLU(),\n nn.Linear(256, len(LABELS)),\n)\nstate_dict = torch.load('pytorch_model.bin', map_location='cpu')\nmodel.load_state_dict(state_dict, strict=False)\nmodel.eval()\n```\n\n## 2. Define a `predict` function\n\nNext, you will need to define a function that takes in the *user input*, which in this case is a sketched image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/class_names.txt).\n\nIn the case of our pretrained model, it will look like this:\n\n```python\nfrom pathlib import Path\n\nLABELS = Path('class_names.txt').read_text().splitlines()\n\ndef predict(img):\n x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.\n with torch.no_grad():\n out = model(x)\n probabilities = torch.nn.functional.softmax(out[0], dim=0)\n values, indices = torch.topk(probabilities, 5)\n confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}\n return confidences\n```\n\nLet's break this down. The function takes one parameters:\n\n* `img`: the input image as a `numpy` array\n\nThen, the function converts the image to a PyTorch `tensor`, passes it through the model, and returns:\n\n* `confidences`: the top five predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities\n\n## 3. Create a Gradio Interface\n\nNow that we have our predictive function set up, we can create a Gradio Interface around it. \n\nIn this case, the input component is a sketchpad. To create a sketchpad input, we can use the convenient string shortcut, `\"sketchpad\"` which creates a canvas for a user to draw on and handles the preprocessing to convert that to a numpy array. \n\nThe output component will be a `\"label\"`, which displays the top labels in a nice form.\n\nFinally, we'll add one more parameter, setting `live=True`, which allows our interface to run in real time, adjusting its predictions every time a user draws on the sketchpad. The code for Gradio looks like this:\n\n```python\nimport gradio as gr\n\ngr.Interface(fn=predict, \n inputs=\"sketchpad\",\n outputs=\"label\",\n live=True).launch()\n```\n\nThis produces the following interface, which you can try right here in your browser (try drawing something, like a \"snake\" or a \"laptop\"):\n\n\n\n----------\n\nAnd you're done! That's all the code you need to build a Pictionary-style guessing app. Have fun and try to find some edge cases \ud83e\uddd0\n\n", "html": "

Building a Pictionary App

\n\n

Introduction

\n\n

How well can an algorithm guess what you're drawing? A few years ago, Google released the Quick Draw dataset, which contains drawings made by humans of a variety of every objects. Researchers have used this dataset to train models to guess Pictionary-style drawings.

\n\n

Such models are perfect to use with Gradio's sketchpad input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and will look like this (try drawing something!):

\n\n\n\n

Let's get started! This guide covers how to build a pictionary app (step-by-step):

\n\n
    \n
  1. Set up the Sketch Recognition Model
  2. \n
  3. Define a predict function
  4. \n
  5. Create a Gradio Interface
  6. \n
\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained sketchpad model, also install torch.

\n\n

1. Set up the Sketch Recognition Model

\n\n

First, you will need a sketch recognition model. Since many researchers have already trained their own models on the Quick Draw dataset, we will use a pretrained model in this tutorial. Our model is a light 1.5 MB model trained by Nate Raw, that you can download here.

\n\n

If you are interested, here is the code that was used to train the model. We will simply load the pretrained model in PyTorch, as follows:

\n\n
import torch\nfrom torch import nn\n\nmodel = nn.Sequential(\n    nn.Conv2d(1, 32, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Conv2d(32, 64, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Conv2d(64, 128, 3, padding='same'),\n    nn.ReLU(),\n    nn.MaxPool2d(2),\n    nn.Flatten(),\n    nn.Linear(1152, 256),\n    nn.ReLU(),\n    nn.Linear(256, len(LABELS)),\n)\nstate_dict = torch.load('pytorch_model.bin',    map_location='cpu')\nmodel.load_state_dict(state_dict, strict=False)\nmodel.eval()\n
\n\n

2. Define a predict function

\n\n

Next, you will need to define a function that takes in the user input, which in this case is a sketched image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this text file.

\n\n

In the case of our pretrained model, it will look like this:

\n\n
from pathlib import Path\n\nLABELS = Path('class_names.txt').read_text().splitlines()\n\ndef predict(img):\n    x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.\n    with torch.no_grad():\n        out = model(x)\n    probabilities = torch.nn.functional.softmax(out[0], dim=0)\n    values, indices = torch.topk(probabilities, 5)\n    confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}\n    return confidences\n
\n\n

Let's break this down. The function takes one parameters:

\n\n
    \n
  • img: the input image as a numpy array
  • \n
\n\n

Then, the function converts the image to a PyTorch tensor, passes it through the model, and returns:

\n\n
    \n
  • confidences: the top five predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
  • \n
\n\n

3. Create a Gradio Interface

\n\n

Now that we have our predictive function set up, we can create a Gradio Interface around it.

\n\n

In this case, the input component is a sketchpad. To create a sketchpad input, we can use the convenient string shortcut, \"sketchpad\" which creates a canvas for a user to draw on and handles the preprocessing to convert that to a numpy array.

\n\n

The output component will be a \"label\", which displays the top labels in a nice form.

\n\n

Finally, we'll add one more parameter, setting live=True, which allows our interface to run in real time, adjusting its predictions every time a user draws on the sketchpad. The code for Gradio looks like this:

\n\n
import gradio as gr\n\ngr.Interface(fn=predict, \n             inputs=\"sketchpad\",\n             outputs=\"label\",\n             live=True).launch()\n
\n\n

This produces the following interface, which you can try right here in your browser (try drawing something, like a \"snake\" or a \"laptop\"):

\n\n\n\n
\n\n

And you're done! That's all the code you need to build a Pictionary-style guessing app. Have fun and try to find some edge cases \ud83e\uddd0

\n", "tags": ["SKETCHPAD", "LABELS", "LIVE"], "spaces": ["https://huggingface.co/spaces/nateraw/quickdraw"], "url": "/guides/building-a-pictionary-app/", "contributor": null}, {"name": "create-your-own-friends-with-a-gan", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 34, "pretty_name": "Create Your Own Friends With A Gan", "content": "# Create Your Own Friends with a GAN\n\n\n\n\n\n\n\n## Introduction\n\nIt seems that cryptocurrencies, [NFTs](https://www.nytimes.com/interactive/2022/03/18/technology/nft-guide.html), and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets [may be taxable, such as in Canada](https://www.canada.ca/en/revenue-agency/programs/about-canada-revenue-agency-cra/compliance/digital-currency/cryptocurrency-guide.html), today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated [CryptoPunks](https://www.larvalabs.com/cryptopunks).\n\nGenerative Adversarial Networks, often known just as *GANs*, are a specific class of deep-learning models that are designed to learn from an input dataset to create (*generate!*) new material that is convincingly similar to elements of the original training set. Famously, the website [thispersondoesnotexist.com](https://thispersondoesnotexist.com/) went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even [music](https://salu133445.github.io/musegan/)!\n\nToday we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:\n\n\n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained model, also install `torch` and `torchvision`.\n\n## GANs: a very brief introduction\n\nOriginally proposed in [Goodfellow et al. 2014](https://arxiv.org/abs/1406.2661), GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the *generator*, is responsible for generating images. The other network, the *discriminator*, receives an image at a time from the generator along with a **real** image from the training data set. The discriminator then has to guess: which image is the fake?\n\nThe generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (*adversarial!*) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!\n\nFor a more in-depth look at GANs, you can take a look at [this excellent post on Analytics Vidhya](https://www.analyticsvidhya.com/blog/2021/06/a-detailed-explanation-of-gan-with-implementation-using-tensorflow-and-keras/) or this [PyTorch tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html). For now, though, we'll dive into a demo!\n\n## Step 1 \u2014 Create the Generator model\n\nTo generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:\n\n```python\nfrom torch import nn\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n```\n\nWe're taking the generator from [this repo by @teddykoker](https://github.com/teddykoker/cryptopunks-gan/blob/main/train.py#L90), where you can also see the original discriminator model structure.\n\nAfter instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at [nateraw/cryptopunks-gan](https://huggingface.co/nateraw/cryptopunks-gan):\n\n```python\nfrom huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n```\n\n## Step 2 \u2014 Defining a `predict` function\n\nThe `predict` function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our `predict` function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use `torchvision`'s `save_image` function to save the output of the model as a `png` file, and return the file name:\n\n```python\nfrom torchvision.utils import save_image\n\ndef predict(seed):\n num_punks = 4\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWe're giving our `predict` function a `seed` parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.\n\n*Note!* Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.\n\n## Step 3 \u2014 Creating a Gradio interface\n\nAt this point you can even run the code you have with `predict()`, and you'll find your freshly generated punks in your file system at `./punks.png`. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:\n\n* Set a slider input so users can choose the \"seed\" value\n* Use an image component for our output to showcase the generated punks\n* Use our `predict()` to take the seed and generate the images\n\nWith `gr.Interface()`, we can define all of that with a single function call:\n\n```python\nimport gradio as gr\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n ],\n outputs=\"image\",\n).launch()\n```\n\nLaunching the interface should present you with something like this:\n\n\n\n## Step 4 \u2014 Even more punks!\n\nGenerating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the `inputs` list that we pass to `gr.Interface`:\n\n```python\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n ],\n outputs=\"image\",\n).launch()\n```\n\nThe new input will be passed to our `predict()` function, so we have to make some changes to that function to accept a new parameter:\n\n```python\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n```\n\nWhen you relaunch your interface, you should see a second slider that'll let you control the number of punks!\n\n## Step 5 - Polishing it up\n\nYour Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728\n\nWe can add some examples that users can easily try out by adding this to the `gr.Interface`:\n\n```python\ngr.Interface(\n # ...\n # keep everything as it is, and then add\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n```\n\nThe `examples` parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the `inputs`. So in our case, `[seed, num_punks]`. Give it a try!\n\nYou can also try adding a `title`, `description`, and `article` to the `gr.Interface`. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 `article` will also accept HTML, as [explored in a previous guide](/guides/key-features/#descriptive-content)!\n\nWhen you're all done, you may end up with something like this:\n\n\n\nFor reference, here is our full code:\n\n```python\nimport torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n # Refer to the link below for explanations about nc, nz, and ngf\n # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n def __init__(self, nc=4, nz=100, ngf=64):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh(),\n )\n\n def forward(self, input):\n output = self.network(input)\n return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n torch.manual_seed(seed)\n z = torch.randn(num_punks, 100, 1, 1)\n punks = model(z)\n save_image(punks, \"punks.png\", normalize=True)\n return 'punks.png'\n\ngr.Interface(\n predict,\n inputs=[\n gr.Slider(0, 1000, label='Seed', default=42),\n gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n ],\n outputs=\"image\",\n examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n```\n----------\n\nCongratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can [scour the Hub for more GANs](https://huggingface.co/models?other=gan) (or train your own) and continue making even more awesome demos \ud83e\udd17", "html": "

Create Your Own Friends with a GAN

\n\n

Introduction

\n\n

It seems that cryptocurrencies, NFTs, and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets may be taxable, such as in Canada, today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated CryptoPunks.

\n\n

Generative Adversarial Networks, often known just as GANs, are a specific class of deep-learning models that are designed to learn from an input dataset to create (generate!) new material that is convincingly similar to elements of the original training set. Famously, the website thispersondoesnotexist.com went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even music!

\n\n

Today we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:

\n\n\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. To use the pretrained model, also install torch and torchvision.

\n\n

GANs: a very brief introduction

\n\n

Originally proposed in Goodfellow et al. 2014, GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the generator, is responsible for generating images. The other network, the discriminator, receives an image at a time from the generator along with a real image from the training data set. The discriminator then has to guess: which image is the fake?

\n\n

The generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (adversarial!) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!

\n\n

For a more in-depth look at GANs, you can take a look at this excellent post on Analytics Vidhya or this PyTorch tutorial. For now, though, we'll dive into a demo!

\n\n

Step 1 \u2014 Create the Generator model

\n\n

To generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:

\n\n
from torch import nn\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n
\n\n

We're taking the generator from this repo by @teddykoker, where you can also see the original discriminator model structure.

\n\n

After instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at nateraw/cryptopunks-gan:

\n\n
from huggingface_hub import hf_hub_download\nimport torch\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n
\n\n

Step 2 \u2014 Defining a predict function

\n\n

The predict function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our predict function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise into our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use torchvision's save_image function to save the output of the model as a png file, and return the file name:

\n\n
from torchvision.utils import save_image\n\ndef predict(seed):\n    num_punks = 4\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

We're giving our predict function a seed parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.

\n\n

Note! Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.

\n\n

Step 3 \u2014 Creating a Gradio interface

\n\n

At this point you can even run the code you have with predict(<SOME_NUMBER>), and you'll find your freshly generated punks in your file system at ./punks.png. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:

\n\n
    \n
  • Set a slider input so users can choose the \"seed\" value
  • \n
  • Use an image component for our output to showcase the generated punks
  • \n
  • Use our predict() to take the seed and generate the images
  • \n
\n\n

With gr.Interface(), we can define all of that with a single function call:

\n\n
import gradio as gr\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

Launching the interface should present you with something like this:

\n\n\n\n

Step 4 \u2014 Even more punks!

\n\n

Generating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the inputs list that we pass to gr.Interface:

\n\n
gr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!\n    ],\n    outputs=\"image\",\n).launch()\n
\n\n

The new input will be passed to our predict() function, so we have to make some changes to that function to accept a new parameter:

\n\n
def predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n
\n\n

When you relaunch your interface, you should see a second slider that'll let you control the number of punks!

\n\n

Step 5 - Polishing it up

\n\n

Your Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight \u2728

\n\n

We can add some examples that users can easily try out by adding this to the gr.Interface:

\n\n
gr.Interface(\n    # ...\n    # keep everything as it is, and then add\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True) # cache_examples is optional\n
\n\n

The examples parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the inputs. So in our case, [seed, num_punks]. Give it a try!

\n\n

You can also try adding a title, description, and article to the gr.Interface. Each of those parameters accepts a string, so try it out and see what happens \ud83d\udc40 article will also accept HTML, as explored in a previous guide!

\n\n

When you're all done, you may end up with something like this:

\n\n\n\n

For reference, here is our full code:

\n\n
import torch\nfrom torch import nn\nfrom huggingface_hub import hf_hub_download\nfrom torchvision.utils import save_image\nimport gradio as gr\n\nclass Generator(nn.Module):\n    # Refer to the link below for explanations about nc, nz, and ngf\n    # https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs\n    def __init__(self, nc=4, nz=100, ngf=64):\n        super(Generator, self).__init__()\n        self.network = nn.Sequential(\n            nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),\n            nn.BatchNorm2d(ngf * 4),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),\n            nn.BatchNorm2d(ngf * 2),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),\n            nn.BatchNorm2d(ngf),\n            nn.ReLU(True),\n            nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n            nn.Tanh(),\n        )\n\n    def forward(self, input):\n        output = self.network(input)\n        return output\n\nmodel = Generator()\nweights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')\nmodel.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available\n\ndef predict(seed, num_punks):\n    torch.manual_seed(seed)\n    z = torch.randn(num_punks, 100, 1, 1)\n    punks = model(z)\n    save_image(punks, \"punks.png\", normalize=True)\n    return 'punks.png'\n\ngr.Interface(\n    predict,\n    inputs=[\n        gr.Slider(0, 1000, label='Seed', default=42),\n        gr.Slider(4, 64, label='Number of Punks', step=1, default=10),\n    ],\n    outputs=\"image\",\n    examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],\n).launch(cache_examples=True)\n
\n\n
\n\n

Congratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can scour the Hub for more GANs (or train your own) and continue making even more awesome demos \ud83e\udd17

\n", "tags": ["GAN", "IMAGE", "HUB"], "spaces": ["https://huggingface.co/spaces/NimaBoscarino/cryptopunks", "https://huggingface.co/spaces/nateraw/cryptopunks-generator"], "url": "/guides/create-your-own-friends-with-a-gan/", "contributor": "Nima Boscarino and Nate Raw"}, {"name": "creating-a-new-component", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 35, "pretty_name": "Creating A New Component", "content": "# How to Create a New Component\n\n## Introduction\n\nThe purpose of this guide is to illustrate how to add a new component, which you can use in your Gradio applications. The guide will be complemented by code snippets showing step by step how the [ColorPicker](https://gradio.app/docs/#colorpicker) component was added.\n\n## Prerequisites\n\nMake sure you have followed the [CONTRIBUTING.md](https://github.com/gradio-app/gradio/blob/main/CONTRIBUTING.md) guide in order to setup your local development environment (both client and server side).\n\nHere's how to create a new component on Gradio:\n\n1. [Create a New Python Class and Import it](#1-create-a-new-python-class-and-import-it)\n2. [Create a New Svelte Component](#2-create-a-new-svelte-component)\n3. [Create a New Demo](#3-create-a-new-demo)\n\n## 1. Create a New Python Class and Import it\n\nThe first thing to do is to create a new class within the [components.py](https://github.com/gradio-app/gradio/blob/main/gradio/components.py) file. This Python class should inherit from a list of base components and should be placed within the file in the correct section with respect to the type of component you want to add (e.g. input, output or static components).\nIn general, it is advisable to take an existing component as a reference (e.g. [TextBox](https://github.com/gradio-app/gradio/blob/main/gradio/components.py#L290)), copy its code as a skeleton and then adapt it to the case at hand.\n\nLet's take a look at the class added to the [components.py](https://github.com/gradio-app/gradio/blob/main/gradio/components.py) file for the ColorPicker component:\n\n```python\n@document()\nclass ColorPicker(Changeable, Submittable, IOComponent):\n \"\"\"\n Creates a color picker for user to select a color as string input.\n Preprocessing: passes selected color value as a {str} into the function.\n Postprocessing: expects a {str} returned from function and sets color picker value to it.\n Examples-format: a {str} with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.\n Demos: color_picker, color_generator\n \"\"\"\n\n def __init__(\n self,\n value: str = None,\n *,\n label: Optional[str] = None,\n show_label: bool = True,\n interactive: Optional[bool] = None,\n visible: bool = True,\n elem_id: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"\n Parameters:\n value: default text to provide in color picker.\n label: component name in interface.\n show_label: if True, will display label.\n interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.\n visible: If False, component will be hidden.\n elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n \"\"\"\n self.value = self.postprocess(value)\n self.cleared_value = \"#000000\"\n self.test_input = value\n IOComponent.__init__(\n self,\n label=label,\n show_label=show_label,\n interactive=interactive,\n visible=visible,\n elem_id=elem_id,\n **kwargs,\n )\n\n def get_config(self):\n return {\n \"value\": self.value,\n **IOComponent.get_config(self),\n }\n\n @staticmethod\n def update(\n value: Optional[Any] = None,\n label: Optional[str] = None,\n show_label: Optional[bool] = None,\n visible: Optional[bool] = None,\n interactive: Optional[bool] = None,\n ):\n return {\n \"value\": value,\n \"label\": label,\n \"show_label\": show_label,\n \"visible\": visible,\n \"interactive\": interactive,\n \"__type__\": \"update\",\n }\n\n # Input Functionalities\n def preprocess(self, x: str | None) -> Any:\n \"\"\"\n Any preprocessing needed to be performed on function input.\n Parameters:\n x (str): text\n Returns:\n (str): text\n \"\"\"\n if x is None:\n return None\n else:\n return str(x)\n\n def preprocess_example(self, x: str | None) -> Any:\n \"\"\"\n Any preprocessing needed to be performed on an example before being passed to the main function.\n \"\"\"\n if x is None:\n return None\n else:\n return str(x)\n\n # Output Functionalities\n def postprocess(self, y: str | None):\n \"\"\"\n Any postprocessing needed to be performed on function output.\n Parameters:\n y (str | None): text\n Returns:\n (str | None): text\n \"\"\"\n if y is None:\n return None\n else:\n return str(y)\n\n def deserialize(self, x):\n \"\"\"\n Convert from serialized output (e.g. base64 representation) from a call() to the interface to a human-readable version of the output (path of an image, etc.)\n \"\"\"\n return x\n```\n\nOnce defined, it is necessary to import the new class inside the [\\_\\_init\\_\\_](https://github.com/gradio-app/gradio/blob/main/gradio/__init__.py) module class in order to make it module visible.\n\n```python\n\nfrom gradio.components import (\n ...\n ColorPicker,\n ...\n)\n\n```\n\n### 1.1 Writing Unit Test for Python Class\n\nWhen developing new components, you should also write a suite of unit tests for it. The tests should be placed in the [gradio/test/test_components.py](https://github.com/gradio-app/gradio/blob/main/test/test_components.py) file. Again, as above, take a cue from the tests of other components (e.g. [Textbox](https://github.com/gradio-app/gradio/blob/main/test/test_components.py)) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component. For example, the following tests were added for the ColorPicker component:\n\n```python\nclass TestColorPicker(unittest.TestCase):\n def test_component_functions(self):\n \"\"\"\n Preprocess, postprocess, serialize, save_flagged, restore_flagged, tokenize, get_config\n \"\"\"\n color_picker_input = gr.ColorPicker()\n self.assertEqual(color_picker_input.preprocess(\"#000000\"), \"#000000\")\n self.assertEqual(color_picker_input.preprocess_example(\"#000000\"), \"#000000\")\n self.assertEqual(color_picker_input.postprocess(None), None)\n self.assertEqual(color_picker_input.postprocess(\"#FFFFFF\"), \"#FFFFFF\")\n self.assertEqual(color_picker_input.serialize(\"#000000\", True), \"#000000\")\n\n color_picker_input.interpretation_replacement = \"unknown\"\n\n self.assertEqual(\n color_picker_input.get_config(),\n {\n \"value\": None,\n \"show_label\": True,\n \"label\": None,\n \"style\": {},\n \"elem_id\": None,\n \"visible\": True,\n \"interactive\": None,\n \"name\": \"colorpicker\",\n },\n )\n\n def test_in_interface_as_input(self):\n \"\"\"\n Interface, process, interpret,\n \"\"\"\n iface = gr.Interface(lambda x: x, \"colorpicker\", \"colorpicker\")\n self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n def test_in_interface_as_output(self):\n \"\"\"\n Interface, process\n\n \"\"\"\n iface = gr.Interface(lambda x: x, \"colorpicker\", gr.ColorPicker())\n self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n def test_static(self):\n \"\"\"\n postprocess\n \"\"\"\n component = gr.ColorPicker(\"#000000\")\n self.assertEqual(component.get_config().get(\"value\"), \"#000000\")\n```\n\n## 2. Create a New Svelte Component\n\nLet's see the steps you need to follow to create the frontend of your new component and to map it to its python code:\n\n- Create a new UI-side Svelte component and figure out where to place it. The options are: create a package for the new component in the [js folder](https://github.com/gradio-app/gradio/tree/main/js/), if this is completely different from existing components or add the new component to an existing package, such as to the [form package](https://github.com/gradio-app/gradio/tree/main/js/form). The ColorPicker component for example, was included in the form package because it is similar to components that already exist.\n- Create a file with an appropriate name in the src folder of the package where you placed the Svelte component, note: the name must start with a capital letter. This is the 'core' component and it's the generic component that has no knowledge of Gradio specific functionality. Initially add any text/html to this file so that the component renders something. The Svelte application code for the ColorPicker looks like this:\n\n```typescript\n\n\n\n\n```\n\n- Export this file inside the index.ts file of the package where you placed the Svelte component by doing `export { default as FileName } from \"./FileName.svelte\"`. The ColorPicker file is exported in the [index.ts](https://github.com/gradio-app/gradio/blob/main/js/form/src/index.ts) file and the export is performed by doing: `export { default as ColorPicker } from \"./ColorPicker.svelte\";`.\n- Create the Gradio specific component in [js/app/src/components](https://github.com/gradio-app/gradio/tree/main/js/app/src/components). This is a Gradio wrapper that handles the specific logic of the library, passes the necessary data down to the core component and attaches any necessary event listeners. Copy the folder of another component, rename it and edit the code inside it, keeping the structure.\n\nHere you will have three files, the first file is for the Svelte application, and it will look like this:\n\n```typescript\n\n\n\n\n\n\t\n\n\t\n\n```\n\nThe second one contains the tests for the frontend, for example for the ColorPicker component:\n\n```typescript\nimport { test, describe, assert, afterEach } from \"vitest\";\nimport { cleanup, render } from \"@gradio/tootils\";\n\nimport ColorPicker from \"./ColorPicker.svelte\";\nimport type { LoadingStatus } from \"@gradio/statustracker/types\";\n\nconst loading_status = {\n\teta: 0,\n\tqueue_position: 1,\n\tstatus: \"complete\" as LoadingStatus[\"status\"],\n\tscroll_to_output: false,\n\tvisible: true,\n\tfn_index: 0\n};\n\ndescribe(\"ColorPicker\", () => {\n\tafterEach(() => cleanup());\n\n\ttest(\"renders provided value\", () => {\n\t\tconst { getByDisplayValue } = render(ColorPicker, {\n\t\t\tloading_status,\n\t\t\tshow_label: true,\n\t\t\tmode: \"dynamic\",\n\t\t\tvalue: \"#000000\",\n\t\t\tlabel: \"ColorPicker\"\n\t\t});\n\n\t\tconst item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\t\tassert.equal(item.value, \"#000000\");\n\t});\n\n\ttest(\"changing the color should update the value\", async () => {\n\t\tconst { component, getByDisplayValue } = render(ColorPicker, {\n\t\t\tloading_status,\n\t\t\tshow_label: true,\n\t\t\tmode: \"dynamic\",\n\t\t\tvalue: \"#000000\",\n\t\t\tlabel: \"ColorPicker\"\n\t\t});\n\n\t\tconst item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\n\t\tassert.equal(item.value, \"#000000\");\n\n\t\tawait component.$set({\n\t\t\tvalue: \"#FFFFFF\"\n\t\t});\n\n\t\tassert.equal(component.value, \"#FFFFFF\");\n\t});\n});\n```\n\nThe third one is the index.ts file:\n\n```typescript\nexport { default as Component } from \"./ColorPicker.svelte\";\nexport const modes = [\"static\", \"dynamic\"];\n```\n\n- Add the mapping for your component in the [directory.ts file](https://github.com/gradio-app/gradio/blob/main/js/app/src/components/directory.ts). To do this, copy and paste the mapping line of any component and edit its text. The key name must be the lowercase version of the actual component name in the Python library. So for example, for the ColorPicker component the mapping looks like this:\n\n```typescript\nexport const component_map = {\n...\ncolorpicker: () => import(\"./ColorPicker\"),\n...\n}\n```\n\n### 2.1 Writing Unit Test for Svelte Component\n\nWhen developing new components, you should also write a suite of unit tests for it. The tests should be placed in the new component's folder in a file named MyAwesomeComponent.test.ts. Again, as above, take a cue from the tests of other components (e.g. [Textbox.test.ts](https://github.com/gradio-app/gradio/blob/main/js/app/src/components/Textbox/Textbox.test.ts)) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component.\n\n### 3. Create a New Demo\n\nThe last step is to create a demo in the [gradio/demo folder](https://github.com/gradio-app/gradio/tree/main/demo), which will use the newly added component. Again, the suggestion is to reference an existing demo. Write the code for the demo in a file called run.py, add the necessary requirements and an image showing the application interface. Finally add a gif showing its usage.\nYou can take a look at the [demo](https://github.com/gradio-app/gradio/tree/main/demo/color_picker) created for the ColorPicker, where an icon and a color selected through the new component is taken as input, and the same icon colored with the selected color is returned as output.\n\nTo test the application:\n\n- run on a terminal `python path/demo/run.py` which starts the backend at the address [http://localhost:7860](http://localhost:7860);\n- in another terminal, run `pnpm dev` to start the frontend at [http://localhost:9876](http://localhost:9876) with hot reload functionalities.\n\n## Conclusion\n\nIn this guide, we have shown how simple it is to add a new component to Gradio, seeing step by step how the ColorPicker component was added. For further details, you can refer to PR: [#1695](https://github.com/gradio-app/gradio/pull/1695).\n", "html": "

How to Create a New Component

\n\n

Introduction

\n\n

The purpose of this guide is to illustrate how to add a new component, which you can use in your Gradio applications. The guide will be complemented by code snippets showing step by step how the ColorPicker component was added.

\n\n

Prerequisites

\n\n

Make sure you have followed the CONTRIBUTING.md guide in order to setup your local development environment (both client and server side).

\n\n

Here's how to create a new component on Gradio:

\n\n
    \n
  1. Create a New Python Class and Import it
  2. \n
  3. Create a New Svelte Component
  4. \n
  5. Create a New Demo
  6. \n
\n\n

1. Create a New Python Class and Import it

\n\n

The first thing to do is to create a new class within the components.py file. This Python class should inherit from a list of base components and should be placed within the file in the correct section with respect to the type of component you want to add (e.g. input, output or static components).\nIn general, it is advisable to take an existing component as a reference (e.g. TextBox), copy its code as a skeleton and then adapt it to the case at hand.

\n\n

Let's take a look at the class added to the components.py file for the ColorPicker component:

\n\n
@document()\nclass ColorPicker(Changeable, Submittable, IOComponent):\n    \"\"\"\n    Creates a color picker for user to select a color as string input.\n    Preprocessing: passes selected color value as a {str} into the function.\n    Postprocessing: expects a {str} returned from function and sets color picker value to it.\n    Examples-format: a {str} with a hexadecimal representation of a color, e.g. \"#ff0000\" for red.\n    Demos: color_picker, color_generator\n    \"\"\"\n\n    def __init__(\n        self,\n        value: str = None,\n        *,\n        label: Optional[str] = None,\n        show_label: bool = True,\n        interactive: Optional[bool] = None,\n        visible: bool = True,\n        elem_id: Optional[str] = None,\n        **kwargs,\n    ):\n        \"\"\"\n        Parameters:\n            value: default text to provide in color picker.\n            label: component name in interface.\n            show_label: if True, will display label.\n            interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.\n            visible: If False, component will be hidden.\n            elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.\n        \"\"\"\n        self.value = self.postprocess(value)\n        self.cleared_value = \"#000000\"\n        self.test_input = value\n        IOComponent.__init__(\n            self,\n            label=label,\n            show_label=show_label,\n            interactive=interactive,\n            visible=visible,\n            elem_id=elem_id,\n            **kwargs,\n        )\n\n    def get_config(self):\n        return {\n            \"value\": self.value,\n            **IOComponent.get_config(self),\n        }\n\n    @staticmethod\n    def update(\n        value: Optional[Any] = None,\n        label: Optional[str] = None,\n        show_label: Optional[bool] = None,\n        visible: Optional[bool] = None,\n        interactive: Optional[bool] = None,\n    ):\n        return {\n            \"value\": value,\n            \"label\": label,\n            \"show_label\": show_label,\n            \"visible\": visible,\n            \"interactive\": interactive,\n            \"__type__\": \"update\",\n        }\n\n    # Input Functionalities\n    def preprocess(self, x: str | None) -> Any:\n        \"\"\"\n        Any preprocessing needed to be performed on function input.\n        Parameters:\n        x (str): text\n        Returns:\n        (str): text\n        \"\"\"\n        if x is None:\n            return None\n        else:\n            return str(x)\n\n    def preprocess_example(self, x: str | None) -> Any:\n        \"\"\"\n        Any preprocessing needed to be performed on an example before being passed to the main function.\n        \"\"\"\n        if x is None:\n            return None\n        else:\n            return str(x)\n\n    # Output Functionalities\n    def postprocess(self, y: str | None):\n        \"\"\"\n        Any postprocessing needed to be performed on function output.\n        Parameters:\n        y (str | None): text\n        Returns:\n        (str | None): text\n        \"\"\"\n        if y is None:\n            return None\n        else:\n            return str(y)\n\n    def deserialize(self, x):\n        \"\"\"\n        Convert from serialized output (e.g. base64 representation) from a call() to the interface to a human-readable version of the output (path of an image, etc.)\n        \"\"\"\n        return x\n
\n\n

Once defined, it is necessary to import the new class inside the __init__ module class in order to make it module visible.

\n\n
\nfrom gradio.components import (\n    ...\n    ColorPicker,\n    ...\n)\n\n
\n\n

1.1 Writing Unit Test for Python Class

\n\n

When developing new components, you should also write a suite of unit tests for it. The tests should be placed in the gradio/test/test_components.py file. Again, as above, take a cue from the tests of other components (e.g. Textbox) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component. For example, the following tests were added for the ColorPicker component:

\n\n
class TestColorPicker(unittest.TestCase):\n    def test_component_functions(self):\n        \"\"\"\n        Preprocess, postprocess, serialize, save_flagged, restore_flagged, tokenize, get_config\n        \"\"\"\n        color_picker_input = gr.ColorPicker()\n        self.assertEqual(color_picker_input.preprocess(\"#000000\"), \"#000000\")\n        self.assertEqual(color_picker_input.preprocess_example(\"#000000\"), \"#000000\")\n        self.assertEqual(color_picker_input.postprocess(None), None)\n        self.assertEqual(color_picker_input.postprocess(\"#FFFFFF\"), \"#FFFFFF\")\n        self.assertEqual(color_picker_input.serialize(\"#000000\", True), \"#000000\")\n\n        color_picker_input.interpretation_replacement = \"unknown\"\n\n        self.assertEqual(\n            color_picker_input.get_config(),\n            {\n                \"value\": None,\n                \"show_label\": True,\n                \"label\": None,\n                \"style\": {},\n                \"elem_id\": None,\n                \"visible\": True,\n                \"interactive\": None,\n                \"name\": \"colorpicker\",\n            },\n        )\n\n    def test_in_interface_as_input(self):\n        \"\"\"\n        Interface, process, interpret,\n        \"\"\"\n        iface = gr.Interface(lambda x: x, \"colorpicker\", \"colorpicker\")\n        self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n    def test_in_interface_as_output(self):\n        \"\"\"\n        Interface, process\n\n        \"\"\"\n        iface = gr.Interface(lambda x: x, \"colorpicker\", gr.ColorPicker())\n        self.assertEqual(iface.process([\"#000000\"]), [\"#000000\"])\n\n    def test_static(self):\n        \"\"\"\n        postprocess\n        \"\"\"\n        component = gr.ColorPicker(\"#000000\")\n        self.assertEqual(component.get_config().get(\"value\"), \"#000000\")\n
\n\n

2. Create a New Svelte Component

\n\n

Let's see the steps you need to follow to create the frontend of your new component and to map it to its python code:

\n\n
    \n
  • Create a new UI-side Svelte component and figure out where to place it. The options are: create a package for the new component in the js folder, if this is completely different from existing components or add the new component to an existing package, such as to the form package. The ColorPicker component for example, was included in the form package because it is similar to components that already exist.
  • \n
  • Create a file with an appropriate name in the src folder of the package where you placed the Svelte component, note: the name must start with a capital letter. This is the 'core' component and it's the generic component that has no knowledge of Gradio specific functionality. Initially add any text/html to this file so that the component renders something. The Svelte application code for the ColorPicker looks like this:
  • \n
\n\n
\n\n\n\n
\n\n
    \n
  • Export this file inside the index.ts file of the package where you placed the Svelte component by doing export { default as FileName } from \"./FileName.svelte\". The ColorPicker file is exported in the index.ts file and the export is performed by doing: export { default as ColorPicker } from \"./ColorPicker.svelte\";.
  • \n
  • Create the Gradio specific component in js/app/src/components. This is a Gradio wrapper that handles the specific logic of the library, passes the necessary data down to the core component and attaches any necessary event listeners. Copy the folder of another component, rename it and edit the code inside it, keeping the structure.
  • \n
\n\n

Here you will have three files, the first file is for the Svelte application, and it will look like this:

\n\n
\n\n\n\nmd5-5bf208f8c1244c0681e3d075472ffa26\n\n\n\n\n    \n\n    \n\n
\n\n

The second one contains the tests for the frontend, for example for the ColorPicker component:

\n\n
import { test, describe, assert, afterEach } from \"vitest\";\nimport { cleanup, render } from \"@gradio/tootils\";\n\nimport ColorPicker from \"./ColorPicker.svelte\";\nimport type { LoadingStatus } from \"@gradio/statustracker/types\";\n\nconst loading_status = {\n    eta: 0,\n    queue_position: 1,\n    status: \"complete\" as LoadingStatus[\"status\"],\n    scroll_to_output: false,\n    visible: true,\n    fn_index: 0\n};\n\ndescribe(\"ColorPicker\", () => {\n    afterEach(() => cleanup());\n\n    test(\"renders provided value\", () => {\n        const { getByDisplayValue } = render(ColorPicker, {\n            loading_status,\n            show_label: true,\n            mode: \"dynamic\",\n            value: \"#000000\",\n            label: \"ColorPicker\"\n        });\n\n        const item: HTMLInputElement = getByDisplayValue(\"#000000\");\n        assert.equal(item.value, \"#000000\");\n    });\n\n    test(\"changing the color should update the value\", async () => {\n        const { component, getByDisplayValue } = render(ColorPicker, {\n            loading_status,\n            show_label: true,\n            mode: \"dynamic\",\n            value: \"#000000\",\n            label: \"ColorPicker\"\n        });\n\n        const item: HTMLInputElement = getByDisplayValue(\"#000000\");\n\n        assert.equal(item.value, \"#000000\");\n\n        await component.$set({\n            value: \"#FFFFFF\"\n        });\n\n        assert.equal(component.value, \"#FFFFFF\");\n    });\n});\n
\n\n

The third one is the index.ts file:

\n\n
export { default as Component } from \"./ColorPicker.svelte\";\nexport const modes = [\"static\", \"dynamic\"];\n
\n\n
    \n
  • Add the mapping for your component in the directory.ts file. To do this, copy and paste the mapping line of any component and edit its text. The key name must be the lowercase version of the actual component name in the Python library. So for example, for the ColorPicker component the mapping looks like this:
  • \n
\n\n
export const component_map = {\n...\ncolorpicker: () => import(\"./ColorPicker\"),\n...\n}\n
\n\n

2.1 Writing Unit Test for Svelte Component

\n\n

When developing new components, you should also write a suite of unit tests for it. The tests should be placed in the new component's folder in a file named MyAwesomeComponent.test.ts. Again, as above, take a cue from the tests of other components (e.g. Textbox.test.ts) and add as many unit tests as you think are appropriate to test all the different aspects and functionalities of the new component.

\n\n

3. Create a New Demo

\n\n

The last step is to create a demo in the gradio/demo folder, which will use the newly added component. Again, the suggestion is to reference an existing demo. Write the code for the demo in a file called run.py, add the necessary requirements and an image showing the application interface. Finally add a gif showing its usage.\nYou can take a look at the demo created for the ColorPicker, where an icon and a color selected through the new component is taken as input, and the same icon colored with the selected color is returned as output.

\n\n

To test the application:

\n\n
    \n
  • run on a terminal python path/demo/run.py which starts the backend at the address http://localhost:7860;
  • \n
  • in another terminal, run pnpm dev to start the frontend at http://localhost:9876 with hot reload functionalities.
  • \n
\n\n

Conclusion

\n\n

In this guide, we have shown how simple it is to add a new component to Gradio, seeing step by step how the ColorPicker component was added. For further details, you can refer to PR: #1695.

\n", "tags": [], "spaces": [], "url": "/guides/creating-a-new-component/", "contributor": null}, {"name": "custom-interpretations-with-blocks", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 36, "pretty_name": "Custom Interpretations With Blocks", "content": "# Custom Machine Learning Interpretations with Blocks\n\n\n**Prerequisite**: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to [read the Guide to Blocks first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control) as well as the\ninterpretation section of the [Advanced Interface Features Guide](/advanced-interface-features#interpreting-your-predictions).\n\n## Introduction\n\nIf you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the `interpretation` parameter to either \"default\" or \"shap\".\n\nYou may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!\n\nThis guide will show how to:\n\n1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.\n2. Customize how interpretations are displayed in a Blocks app.\n\nLet's get started!\n\n## Setting up the Blocks app\n\nLet's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input `Textbox` and a single output `Label` component.\nBelow is the code for the app as well as the app itself.\n\n```python\nimport gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n pred = sentiment_classifier(text)\n return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n\n classify.click(classifier, input_text, label)\ndemo.launch()\n```\n\n \n\n## Adding interpretations to the app\n\nOur goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!\n\nFor each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those `(word, score)` pairs we can use gradio to visualize them for the user.\n\nThe [shap](https://shap.readthedocs.io/en/stable/index.html) library will help us compute the `(word, score)` pairs and\ngradio will take care of displaying the output to the user.\n\nThe following code computes the `(word, score)` pairs:\n\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n \n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n # Scores contains (word, score) pairs\n \n \n # Format expected by gr.components.Interpretation\n return {\"original\": text, \"interpretation\": scores}\n```\n\nNow, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use `gr.components.Interpretation`.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how `Interface` displays the interpretation output for text.\n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n interpretation = gr.components.Interpretation(input_text)\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n```\n\n \n\n\n## Customizing how the interpretation is displayed\n\nThe `gr.components.Interpretation` component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?\n\nOne way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.\n\nWe can do this by modifying our `interpretation_function` to additionally return a matplotlib bar plot.\nWe will display it with the `gr.Plot` component in a separate tab.\n\nThis is how the interpretation function will look:\n```python\ndef interpretation_function(text):\n explainer = shap.Explainer(sentiment_classifier)\n shap_values = explainer([text])\n # Dimensions are (batch size, text size, number of classes)\n # Since we care about positive sentiment, use index 1\n scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n # Filter out empty string added by shap\n scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n fig_m = plt.figure()\n \n # Select top 5 words that contribute to positive sentiment\n plt.bar(x=[s[0] for s in scores_desc[:5]],\n height=[s[1] for s in scores_desc[:5]])\n plt.title(\"Top words contributing to positive sentiment\")\n plt.ylabel(\"Shap Value\")\n plt.xlabel(\"Word\")\n return {\"original\": text, \"interpretation\": scores}, fig_m\n```\n\nAnd this is how the app code will look:\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n input_text = gr.Textbox(label=\"Input Text\")\n with gr.Row():\n classify = gr.Button(\"Classify Sentiment\")\n interpret = gr.Button(\"Interpret\")\n with gr.Column():\n label = gr.Label(label=\"Predicted Sentiment\")\n with gr.Column():\n with gr.Tabs():\n with gr.TabItem(\"Display interpretation with built-in component\"):\n interpretation = gr.components.Interpretation(input_text)\n with gr.TabItem(\"Display interpretation with plot\"):\n interpretation_plot = gr.Plot()\n\n classify.click(classifier, input_text, label)\n interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n```\n\nYou can see the demo below!\n\n \n\n## Beyond Sentiment Classification\nAlthough we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an `gr.Image` or `gr.Label` but the input can be almost anything (`gr.Number`, `gr.Slider`, `gr.Radio`, `gr.Image`).\n\nHere is a demo built with blocks of interpretations for an image classification model:\n\n \n\n\n## Closing remarks\n\nWe did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.\n\nWe also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.\n\nAdding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!\n", "html": "

Custom Machine Learning Interpretations with Blocks

\n\n

Prerequisite: This Guide requires you to know about Blocks and the interpretation feature of Interfaces.\nMake sure to read the Guide to Blocks first as well as the\ninterpretation section of the Advanced Interface Features Guide.

\n\n

Introduction

\n\n

If you have experience working with the Interface class, then you know that interpreting the prediction of your machine learning model\nis as easy as setting the interpretation parameter to either \"default\" or \"shap\".

\n\n

You may be wondering if it is possible to add the same interpretation functionality to an app built with the Blocks API.\nNot only is it possible, but the flexibility of Blocks lets you display the interpretation output in ways that are\nimpossible to do with Interfaces!

\n\n

This guide will show how to:

\n\n
    \n
  1. Recreate the behavior of Interfaces's interpretation feature in a Blocks app.
  2. \n
  3. Customize how interpretations are displayed in a Blocks app.
  4. \n
\n\n

Let's get started!

\n\n

Setting up the Blocks app

\n\n

Let's build a sentiment classification app with the Blocks API.\nThis app will take text as input and output the probability that this text expresses either negative or positive sentiment.\nWe'll have a single input Textbox and a single output Label component.\nBelow is the code for the app as well as the app itself.

\n\n
import gradio as gr \nfrom transformers import pipeline\n\nsentiment_classifier = pipeline(\"text-classification\", return_all_scores=True)\n\ndef classifier(text):\n    pred = sentiment_classifier(text)\n    return {p[\"label\"]: p[\"score\"] for p in pred[0]}\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n\n    classify.click(classifier, input_text, label)\ndemo.launch()\n
\n\n

\n\n

Adding interpretations to the app

\n\n

Our goal is to present to our users how the words in the input contribute to the model's prediction.\nThis will help our users understand how the model works and also evaluate its effectiveness.\nFor example, we should expect our model to identify the words \"happy\" and \"love\" with positive sentiment - if not it's a sign we made a mistake in training it!

\n\n

For each word in the input, we will compute a score of how much the model's prediction of positive sentiment is changed by that word.\nOnce we have those (word, score) pairs we can use gradio to visualize them for the user.

\n\n

The shap library will help us compute the (word, score) pairs and\ngradio will take care of displaying the output to the user.

\n\n

The following code computes the (word, score) pairs:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n    # Scores contains (word, score) pairs\n\n\n    # Format expected by gr.components.Interpretation\n    return {\"original\": text, \"interpretation\": scores}\n
\n\n

Now, all we have to do is add a button that runs this function when clicked.\nTo display the interpretation, we will use gr.components.Interpretation.\nThis will color each word in the input either red or blue.\nRed if it contributes to positive sentiment and blue if it contributes to negative sentiment.\nThis is how Interface displays the interpretation output for text.

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            interpretation = gr.components.Interpretation(input_text)\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, interpretation)\n\ndemo.launch()\n
\n\n

\n\n

Customizing how the interpretation is displayed

\n\n

The gr.components.Interpretation component does a good job of showing how individual words contribute to the sentiment prediction,\nbut what if we also wanted to display the score themselves along with the words?

\n\n

One way to do this would be to generate a bar plot where the words are on the horizontal axis and the bar height corresponds\nto the shap score.

\n\n

We can do this by modifying our interpretation_function to additionally return a matplotlib bar plot.\nWe will display it with the gr.Plot component in a separate tab.

\n\n

This is how the interpretation function will look:

\n\n
def interpretation_function(text):\n    explainer = shap.Explainer(sentiment_classifier)\n    shap_values = explainer([text])\n    # Dimensions are (batch size, text size, number of classes)\n    # Since we care about positive sentiment, use index 1\n    scores = list(zip(shap_values.data[0], shap_values.values[0, :, 1]))\n\n    scores_desc = sorted(scores, key=lambda t: t[1])[::-1]\n\n    # Filter out empty string added by shap\n    scores_desc = [t for t in scores_desc if t[0] != \"\"]\n\n    fig_m = plt.figure()\n\n    # Select top 5 words that contribute to positive sentiment\n    plt.bar(x=[s[0] for s in scores_desc[:5]],\n            height=[s[1] for s in scores_desc[:5]])\n    plt.title(\"Top words contributing to positive sentiment\")\n    plt.ylabel(\"Shap Value\")\n    plt.xlabel(\"Word\")\n    return {\"original\": text, \"interpretation\": scores}, fig_m\n
\n\n

And this is how the app code will look:

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            input_text = gr.Textbox(label=\"Input Text\")\n            with gr.Row():\n                classify = gr.Button(\"Classify Sentiment\")\n                interpret = gr.Button(\"Interpret\")\n        with gr.Column():\n            label = gr.Label(label=\"Predicted Sentiment\")\n        with gr.Column():\n            with gr.Tabs():\n                with gr.TabItem(\"Display interpretation with built-in component\"):\n                    interpretation = gr.components.Interpretation(input_text)\n                with gr.TabItem(\"Display interpretation with plot\"):\n                    interpretation_plot = gr.Plot()\n\n    classify.click(classifier, input_text, label)\n    interpret.click(interpretation_function, input_text, [interpretation, interpretation_plot])\n\ndemo.launch()\n
\n\n

You can see the demo below!

\n\n

\n\n

Beyond Sentiment Classification

\n\n

Although we have focused on sentiment classification so far, you can add interpretations to almost any machine learning model.\nThe output must be an gr.Image or gr.Label but the input can be almost anything (gr.Number, gr.Slider, gr.Radio, gr.Image).

\n\n

Here is a demo built with blocks of interpretations for an image classification model:

\n\n

\n\n

Closing remarks

\n\n

We did a deep dive \ud83e\udd3f into how interpretations work and how you can add them to your Blocks app.

\n\n

We also showed how the Blocks API gives you the power to control how the interpretation is visualized in your app.

\n\n

Adding interpretations is a helpful way to make your users understand and gain trust in your model.\nNow you have all the tools you need to add them to all of your apps!

\n", "tags": ["INTERPRETATION", "SENTIMENT ANALYSIS"], "spaces": [], "url": "/guides/custom-interpretations-with-blocks/", "contributor": null}, {"name": "developing-faster-with-reload-mode", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 37, "pretty_name": "Developing Faster With Reload Mode", "content": "# Developing Faster with Auto-Reloading\n\n**Prerequisite**: This Guide requires you to know about Blocks. Make sure to [read the Guide to Blocks first](https://gradio.app/guides/quickstart/#blocks-more-flexibility-and-control).\n\nThis guide covers auto reloading, reloading in a Python IDE, and using gradio with Jupyter Notebooks.\n\n## Why Auto-Reloading?\n\nWhen you are building a Gradio demo, particularly out of Blocks, you may find it cumbersome to keep re-running your code to test your changes.\n\nTo make it faster and more convenient to write your code, we've made it easier to \"reload\" your Gradio apps instantly when you are developing in a **Python IDE** (like VS Code, Sublime Text, PyCharm, or so on) or generally running your Python code from the terminal. We've also developed an analogous \"magic command\" that allows you to re-run cells faster if you use **Jupyter Notebooks** (or any similar environment like Colab).\n\nThis short Guide will cover both of these methods, so no matter how you write Python, you'll leave knowing how to build Gradio apps faster.\n\n## Python IDE Reload \ud83d\udd25\n\nIf you are building Gradio Blocks using a Python IDE, your file of code (let's name it `run.py`) might look something like this: \n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# Greetings from Gradio!\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n\n inp.change(fn=lambda x: f\"Welcome, {x}!\", \n inputs=inp, \n outputs=out)\n\nif __name__ == \"__main__\":\n demo.launch() \n```\n\nThe problem is that anytime that you want to make a change to your layout, events, or components, you have to close and rerun your app by writing `python run.py`.\n\nInstead of doing this, you can run your code in **reload mode** by changing 1 word: `python` to `gradio`:\n\nIn the terminal, run `gradio run.py`. That's it! \n\nNow, you'll see that after you'll see something like this:\n\n```bash\nLaunching in *reload mode* on: http://127.0.0.1:7860 (Press CTRL+C to quit)\n\nWatching...\n\nWARNING: The --reload flag should not be used in production on Windows.\n```\n\nThe important part here is the line that says `Watching...` What's happening here is that Gradio will be observing the directory where `run.py` file lives, and if the file changes, it will automatically rerun the file for you. So you can focus on writing your code, and your Gradio demo will refresh automatically \ud83e\udd73\n\n\u26a0\ufe0f Warning: the `gradio` command does not detect the parameters passed to the `launch()` methods because the `launch()` method is never called in reload mode. For example, setting `auth`, or `show_error` in `launch()` will not be reflected in the app.\n\nThere is one important thing to keep in mind when using the reload mode: Gradio specifically looks for a Gradio Blocks/Interface demo called `demo` in your code. If you have named your demo something else, you will need to pass in the name of your demo's FastAPI app as the 2nd parameter in your code. For Gradio demos, the FastAPI app can be accessed using the `.app` attribute. So if your `run.py` file looked like this:\n\n```python\nimport gradio as gr\n\nwith gr.Blocks() as my_demo:\n gr.Markdown(\"# Greetings from Gradio!\")\n inp = gr.Textbox(placeholder=\"What is your name?\")\n out = gr.Textbox()\n\n inp.change(fn=lambda x: f\"Welcome, {x}!\", \n inputs=inp, \n outputs=out)\n\nif __name__ == \"__main__\":\n my_demo.launch() \n```\n\nThen you would launch it in reload mode like this: `gradio run.py my_demo.app`. \n\n\ud83d\udd25 If your application accepts command line arguments, you can pass them in as well. Here's an example:\n\n```python\nimport gradio as gr\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--name\", type=str, default=\"User\")\nargs, unknown = parser.parse_known_args()\n\nwith gr.Blocks() as demo:\n gr.Markdown(f\"# Greetings {args.name}!\")\n inp = gr.Textbox()\n out = gr.Textbox()\n\n inp.change(fn=lambda x: x, inputs=inp, outputs=out)\n\nif __name__ == \"__main__\":\n demo.launch()\n```\n\nWhich you could run like this: `gradio run.py --name Gretel`\n\nAs a small aside, this auto-reloading happens if you change your `run.py` source code or the Gradio source code. Meaning that this can be useful if you decide to [contribute to Gradio itself](https://github.com/gradio-app/gradio/blob/main/CONTRIBUTING.md) \u2705\n\n## Jupyter Notebook Magic \ud83d\udd2e\n\nWhat about if you use Jupyter Notebooks (or Colab Notebooks, etc.) to develop code? We got something for you too!\n\nWe've developed a **magic command** that will create and run a Blocks demo for you. To use this, load the gradio extension at the top of your notebook: \n\n`%load_ext gradio`\n\nThen, in the cell that you are developing your Gradio demo, simply write the magic command **`%%blocks`** at the top, and then write the layout and components like you would normally:\n\n```py\n%%blocks \n\nimport gradio as gr\n\ngr.Markdown(\"# Greetings from Gradio!\")\ninp = gr.Textbox(placeholder=\"What is your name?\")\nout = gr.Textbox()\n\ninp.change(fn=lambda x: f\"Welcome, {x}!\", \n inputs=inp, \n outputs=out)\n```\n\nNotice that:\n\n* You do not need to put the boiler plate `with gr.Blocks() as demo:` and `demo.launch()` code \u2014 Gradio does that for you automatically!\n\n* Every time you rerun the cell, Gradio will re-launch your app on the same port and using the same underlying web server. This means you'll see your changes *much, much faster* than if you were rerunning the cell normally. \n\nHere's what it looks like in a jupyter notebook:\n\n![](https://i.ibb.co/nrszFws/Blocks.gif)\n\n\ud83e\ude84 This works in colab notebooks too! [Here's a colab notebook](https://colab.research.google.com/drive/1jUlX1w7JqckRHVE-nbDyMPyZ7fYD8488?authuser=1#scrollTo=zxHYjbCTTz_5) where you can see the Blocks magic in action. Try making some changes and re-running the cell with the Gradio code! \n\nThe Notebook Magic is now the author's preferred way of building Gradio demos. Regardless of how you write Python code, we hope either of these methods will give you a much better development experience using Gradio. \n\n--------\n\n## Next Steps\n\nNow that you know how to develop quickly using Gradio, start building your own! \n\nIf you are looking for inspiration, try exploring demos other people have built with Gradio, [browse public Hugging Face Spaces](http://hf.space/) \ud83e\udd17\n\n", "html": "

Developing Faster with Auto-Reloading

\n\n

Prerequisite: This Guide requires you to know about Blocks. Make sure to read the Guide to Blocks first.

\n\n

This guide covers auto reloading, reloading in a Python IDE, and using gradio with Jupyter Notebooks.

\n\n

Why Auto-Reloading?

\n\n

When you are building a Gradio demo, particularly out of Blocks, you may find it cumbersome to keep re-running your code to test your changes.

\n\n

To make it faster and more convenient to write your code, we've made it easier to \"reload\" your Gradio apps instantly when you are developing in a Python IDE (like VS Code, Sublime Text, PyCharm, or so on) or generally running your Python code from the terminal. We've also developed an analogous \"magic command\" that allows you to re-run cells faster if you use Jupyter Notebooks (or any similar environment like Colab).

\n\n

This short Guide will cover both of these methods, so no matter how you write Python, you'll leave knowing how to build Gradio apps faster.

\n\n

Python IDE Reload \ud83d\udd25

\n\n

If you are building Gradio Blocks using a Python IDE, your file of code (let's name it run.py) might look something like this:

\n\n
import gradio as gr\n\nwith gr.Blocks() as demo:\n    gr.Markdown(\"# Greetings from Gradio!\")\n    inp = gr.Textbox(placeholder=\"What is your name?\")\n    out = gr.Textbox()\n\n    inp.change(fn=lambda x: f\"Welcome, {x}!\", \n               inputs=inp, \n               outputs=out)\n\nif __name__ == \"__main__\":\n    demo.launch()    \n
\n\n

The problem is that anytime that you want to make a change to your layout, events, or components, you have to close and rerun your app by writing python run.py.

\n\n

Instead of doing this, you can run your code in reload mode by changing 1 word: python to gradio:

\n\n

In the terminal, run gradio run.py. That's it!

\n\n

Now, you'll see that after you'll see something like this:

\n\n
Launching in *reload mode* on: http://127.0.0.1:7860 (Press CTRL+C to quit)\n\nWatching...\n\nWARNING:  The --reload flag should not be used in production on Windows.\n
\n\n

The important part here is the line that says Watching... What's happening here is that Gradio will be observing the directory where run.py file lives, and if the file changes, it will automatically rerun the file for you. So you can focus on writing your code, and your Gradio demo will refresh automatically \ud83e\udd73

\n\n

\u26a0\ufe0f Warning: the gradio command does not detect the parameters passed to the launch() methods because the launch() method is never called in reload mode. For example, setting auth, or show_error in launch() will not be reflected in the app.

\n\n

There is one important thing to keep in mind when using the reload mode: Gradio specifically looks for a Gradio Blocks/Interface demo called demo in your code. If you have named your demo something else, you will need to pass in the name of your demo's FastAPI app as the 2nd parameter in your code. For Gradio demos, the FastAPI app can be accessed using the .app attribute. So if your run.py file looked like this:

\n\n
import gradio as gr\n\nwith gr.Blocks() as my_demo:\n    gr.Markdown(\"# Greetings from Gradio!\")\n    inp = gr.Textbox(placeholder=\"What is your name?\")\n    out = gr.Textbox()\n\n    inp.change(fn=lambda x: f\"Welcome, {x}!\", \n               inputs=inp, \n               outputs=out)\n\nif __name__ == \"__main__\":\n    my_demo.launch()    \n
\n\n

Then you would launch it in reload mode like this: gradio run.py my_demo.app.

\n\n

\ud83d\udd25 If your application accepts command line arguments, you can pass them in as well. Here's an example:

\n\n
import gradio as gr\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--name\", type=str, default=\"User\")\nargs, unknown = parser.parse_known_args()\n\nwith gr.Blocks() as demo:\n    gr.Markdown(f\"# Greetings {args.name}!\")\n    inp = gr.Textbox()\n    out = gr.Textbox()\n\n    inp.change(fn=lambda x: x, inputs=inp, outputs=out)\n\nif __name__ == \"__main__\":\n    demo.launch()\n
\n\n

Which you could run like this: gradio run.py --name Gretel

\n\n

As a small aside, this auto-reloading happens if you change your run.py source code or the Gradio source code. Meaning that this can be useful if you decide to contribute to Gradio itself \u2705

\n\n

Jupyter Notebook Magic \ud83d\udd2e

\n\n

What about if you use Jupyter Notebooks (or Colab Notebooks, etc.) to develop code? We got something for you too!

\n\n

We've developed a magic command that will create and run a Blocks demo for you. To use this, load the gradio extension at the top of your notebook:

\n\n

%load_ext gradio

\n\n

Then, in the cell that you are developing your Gradio demo, simply write the magic command %%blocks at the top, and then write the layout and components like you would normally:

\n\n
%%blocks \n\nimport gradio as gr\n\ngr.Markdown(\"# Greetings from Gradio!\")\ninp = gr.Textbox(placeholder=\"What is your name?\")\nout = gr.Textbox()\n\ninp.change(fn=lambda x: f\"Welcome, {x}!\", \n           inputs=inp, \n           outputs=out)\n
\n\n

Notice that:

\n\n
    \n
  • You do not need to put the boiler plate with gr.Blocks() as demo: and demo.launch() code \u2014 Gradio does that for you automatically!

  • \n
  • Every time you rerun the cell, Gradio will re-launch your app on the same port and using the same underlying web server. This means you'll see your changes much, much faster than if you were rerunning the cell normally.

  • \n
\n\n

Here's what it looks like in a jupyter notebook:

\n\n

\"\"

\n\n

\ud83e\ude84 This works in colab notebooks too! Here's a colab notebook where you can see the Blocks magic in action. Try making some changes and re-running the cell with the Gradio code!

\n\n

The Notebook Magic is now the author's preferred way of building Gradio demos. Regardless of how you write Python code, we hope either of these methods will give you a much better development experience using Gradio.

\n\n
\n\n

Next Steps

\n\n

Now that you know how to develop quickly using Gradio, start building your own!

\n\n

If you are looking for inspiration, try exploring demos other people have built with Gradio, browse public Hugging Face Spaces \ud83e\udd17

\n", "tags": [], "spaces": [], "url": "/guides/developing-faster-with-reload-mode/", "contributor": null}, {"name": "how-to-use-3D-model-component", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 38, "pretty_name": "How To Use 3D Model Component", "content": "# How to Use the 3D Model Component\n\n\n\n\n## Introduction\n\n3D models are becoming more popular in machine learning and make for some of the most fun demos to experiment with. Using `gradio`, you can easily build a demo of your 3D image model and share it with anyone. The Gradio 3D Model component accepts 3 file types including: *.obj*, *.glb*, & *.gltf*.\n\nThis guide will show you how to build a demo for your 3D image model in a few lines of code; like the one below. Play around with 3D object by clicking around, dragging and zooming:\n\n \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](https://gradio.app/guides/quickstart).\n\n\n## Taking a Look at the Code\n\nLet's take a look at how to create the minimal interface above. The prediction function in this case will just return the original 3D model mesh, but you can change this function to run inference on your machine learning model. We'll take a look at more complex examples below.\n\n```python\nimport gradio as gr\n\ndef load_mesh(mesh_file_name):\n return mesh_file_name\n\ndemo = gr.Interface(\n fn=load_mesh,\n inputs=gr.Model3D(),\n outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label=\"3D Model\"),\n examples=[\n [\"files/Bunny.obj\"],\n [\"files/Duck.glb\"],\n [\"files/Fox.gltf\"],\n [\"files/face.obj\"],\n ],\n cache_examples=True,\n)\n\ndemo.launch()\n```\n\nLet's break down the code above:\n\n`load_mesh`: This is our 'prediction' function and for simplicity, this function will take in the 3D model mesh and return it.\n\nCreating the Interface:\n\n* `fn`: the prediction function that is used when the user clicks submit. In our case this is the `load_mesh` function.\n* `inputs`: create a model3D input component. The input expects an uploaded file as a {str} filepath.\n* `outputs`: create a model3D output component. The output component also expects a file as a {str} filepath.\n * `clear_color`: this is the background color of the 3D model canvas. Expects RGBa values.\n * `label`: the label that appears on the top left of the component.\n* `examples`: list of 3D model files. The 3D model component can accept *.obj*, *.glb*, & *.gltf* file types.\n* `cache_examples`: saves the predicted output for the examples, to save time on inference.\n\n\n## Exploring mode complex Model3D Demos:\n\nBelow is a demo that uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object. Take a look at the [app.py](https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-obj/blob/main/app.py) file for a peek into the code and the model prediction function.\n \n\nBelow is a demo that uses the PIFu model to convert an image of a clothed human into a 3D digitized model. Take a look at the [spaces.py](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization/blob/main/PIFu/spaces.py) file for a peek into the code and the model prediction function.\n\n \n\n----------\n\nAnd you're done! That's all the code you need to build an interface for your Model3D model. Here are some references that you may find useful:\n\n* Gradio's [\"Getting Started\" guide](https://gradio.app/getting_started/)\n* The first [3D Model Demo](https://huggingface.co/spaces/dawood/Model3D) and [complete code](https://huggingface.co/spaces/dawood/Model3D/tree/main) (on Hugging Face Spaces)\n", "html": "

How to Use the 3D Model Component

\n\n

Introduction

\n\n

3D models are becoming more popular in machine learning and make for some of the most fun demos to experiment with. Using gradio, you can easily build a demo of your 3D image model and share it with anyone. The Gradio 3D Model component accepts 3 file types including: .obj, .glb, & .gltf.

\n\n

This guide will show you how to build a demo for your 3D image model in a few lines of code; like the one below. Play around with 3D object by clicking around, dragging and zooming:

\n\n

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed.

\n\n

Taking a Look at the Code

\n\n

Let's take a look at how to create the minimal interface above. The prediction function in this case will just return the original 3D model mesh, but you can change this function to run inference on your machine learning model. We'll take a look at more complex examples below.

\n\n
import gradio as gr\n\ndef load_mesh(mesh_file_name):\n    return mesh_file_name\n\ndemo = gr.Interface(\n    fn=load_mesh,\n    inputs=gr.Model3D(),\n    outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0],  label=\"3D Model\"),\n    examples=[\n        [\"files/Bunny.obj\"],\n        [\"files/Duck.glb\"],\n        [\"files/Fox.gltf\"],\n        [\"files/face.obj\"],\n    ],\n    cache_examples=True,\n)\n\ndemo.launch()\n
\n\n

Let's break down the code above:

\n\n

load_mesh: This is our 'prediction' function and for simplicity, this function will take in the 3D model mesh and return it.

\n\n

Creating the Interface:

\n\n
    \n
  • fn: the prediction function that is used when the user clicks submit. In our case this is the load_mesh function.
  • \n
  • inputs: create a model3D input component. The input expects an uploaded file as a {str} filepath.
  • \n
  • outputs: create a model3D output component. The output component also expects a file as a {str} filepath.\n
      \n
    • clear_color: this is the background color of the 3D model canvas. Expects RGBa values.
    • \n
    • label: the label that appears on the top left of the component.
    • \n
  • \n
  • examples: list of 3D model files. The 3D model component can accept .obj, .glb, & .gltf file types.
  • \n
  • cache_examples: saves the predicted output for the examples, to save time on inference.
  • \n
\n\n

Exploring mode complex Model3D Demos:

\n\n

Below is a demo that uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object. Take a look at the app.py file for a peek into the code and the model prediction function.\n

\n\n

Below is a demo that uses the PIFu model to convert an image of a clothed human into a 3D digitized model. Take a look at the spaces.py file for a peek into the code and the model prediction function.

\n\n

\n\n
\n\n

And you're done! That's all the code you need to build an interface for your Model3D model. Here are some references that you may find useful:

\n\n\n", "tags": ["VISION", "IMAGE"], "spaces": ["https://huggingface.co/spaces/dawood/Model3D", "https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization", "https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-obj"], "url": "/guides/how-to-use-3D-model-component/", "contributor": null}, {"name": "named-entity-recognition", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 39, "pretty_name": "Named Entity Recognition", "content": "# Named-Entity Recognition \n\n\n\n\n## Introduction\n\nNamed-entity recognition (NER), also known as token classification or text tagging, is the task of taking a sentence and classifying every word (or \"token\") into different categories, such as names of people or names of locations, or different parts of speech. \n\nFor example, given the sentence:\n\n> Does Chicago have any Pakistani restaurants?\n\nA named-entity recognition algorithm may identify:\n\n* \"Chicago\" as a **location**\n* \"Pakistani\" as an **ethnicity** \n\n\nand so on. \n\nUsing `gradio` (specifically the `HighlightedText` component), you can easily build a web demo of your NER model and share that with the rest of your team.\n\nHere is an example of a demo that you'll be able to build:\n\n\n\nThis tutorial will show how to take a pretrained NER model and deploy it with a Gradio interface. We will show two different ways to use the `HighlightedText` component -- depending on your NER model, either of these two ways may be easier to learn! \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained named-entity recognition model. You can use your own, while in this tutorial, we will use one from the `transformers` library.\n\n### Approach 1: List of Entity Dictionaries\n\nMany named-entity recognition models output a list of dictionaries. Each dictionary consists of an *entity*, a \"start\" index, and an \"end\" index. This is, for example, how NER models in the `transformers` library operate:\n\n```py\nfrom transformers import pipeline \nner_pipeline = pipeline(\"ner\")\nner_pipeline(\"Does Chicago have any Pakistani restaurants\")\n```\n\nOutput:\n\n```bash\n[{'entity': 'I-LOC',\n 'score': 0.9988978,\n 'index': 2,\n 'word': 'Chicago',\n 'start': 5,\n 'end': 12},\n {'entity': 'I-MISC',\n 'score': 0.9958592,\n 'index': 5,\n 'word': 'Pakistani',\n 'start': 22,\n 'end': 31}]\n```\n\nIf you have such a model, it is very easy to hook it up to Gradio's `HighlightedText` component. All you need to do is pass in this **list of entities**, along with the **original text** to the model, together as dictionary, with the keys being `\"entities\"` and `\"text\"` respectively.\n\nHere is a complete example:\n\n```python\nfrom transformers import pipeline\n\nimport gradio as gr\n\nner_pipeline = pipeline(\"ner\")\n\nexamples = [\n \"Does Chicago have any stores and does Joe live here?\",\n]\n\ndef ner(text):\n output = ner_pipeline(text)\n return {\"text\": text, \"entities\": output} \n\ndemo = gr.Interface(ner,\n gr.Textbox(placeholder=\"Enter sentence here...\"), \n gr.HighlightedText(),\n examples=examples)\n\ndemo.launch()\n\n```\n\n\n### Approach 2: List of Tuples\n\nAn alternative way to pass data into the `HighlightedText` component is a list of tuples. The first element of each tuple should be the word or words that are being classified into a particular entity. The second element should be the entity label (or `None` if they should be unlabeled). The `HighlightedText` component automatically strings together the words and labels to display the entities.\n\nIn some cases, this can be easier than the first approach. Here is a demo showing this approach using Spacy's parts-of-speech tagger:\n\n```python\nimport gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n doc = nlp(text)\n html = displacy.render(doc, style=\"dep\", page=True)\n html = (\n \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n\n```\n\n\n\n--------------------------------------------\n\n\nAnd you're done! That's all you need to know to build a web-based GUI for your NER model. \n\nFun tip: you can share your NER demo instantly with others simply by setting `share=True` in `launch()`. \n\n\n", "html": "

Named-Entity Recognition

\n\n

Introduction

\n\n

Named-entity recognition (NER), also known as token classification or text tagging, is the task of taking a sentence and classifying every word (or \"token\") into different categories, such as names of people or names of locations, or different parts of speech.

\n\n

For example, given the sentence:

\n\n
\n

Does Chicago have any Pakistani restaurants?

\n
\n\n

A named-entity recognition algorithm may identify:

\n\n
    \n
  • \"Chicago\" as a location
  • \n
  • \"Pakistani\" as an ethnicity
  • \n
\n\n

and so on.

\n\n

Using gradio (specifically the HighlightedText component), you can easily build a web demo of your NER model and share that with the rest of your team.

\n\n

Here is an example of a demo that you'll be able to build:

\n\n

\n\n

This tutorial will show how to take a pretrained NER model and deploy it with a Gradio interface. We will show two different ways to use the HighlightedText component -- depending on your NER model, either of these two ways may be easier to learn!

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained named-entity recognition model. You can use your own, while in this tutorial, we will use one from the transformers library.

\n\n

Approach 1: List of Entity Dictionaries

\n\n

Many named-entity recognition models output a list of dictionaries. Each dictionary consists of an entity, a \"start\" index, and an \"end\" index. This is, for example, how NER models in the transformers library operate:

\n\n
from transformers import pipeline \nner_pipeline = pipeline(\"ner\")\nner_pipeline(\"Does Chicago have any Pakistani restaurants\")\n
\n\n

Output:

\n\n
[{'entity': 'I-LOC',\n  'score': 0.9988978,\n  'index': 2,\n  'word': 'Chicago',\n  'start': 5,\n  'end': 12},\n {'entity': 'I-MISC',\n  'score': 0.9958592,\n  'index': 5,\n  'word': 'Pakistani',\n  'start': 22,\n  'end': 31}]\n
\n\n

If you have such a model, it is very easy to hook it up to Gradio's HighlightedText component. All you need to do is pass in this list of entities, along with the original text to the model, together as dictionary, with the keys being \"entities\" and \"text\" respectively.

\n\n

Here is a complete example:

\n\n
from transformers import pipeline\n\nimport gradio as gr\n\nner_pipeline = pipeline(\"ner\")\n\nexamples = [\n    \"Does Chicago have any stores and does Joe live here?\",\n]\n\ndef ner(text):\n    output = ner_pipeline(text)\n    return {\"text\": text, \"entities\": output}    \n\ndemo = gr.Interface(ner,\n             gr.Textbox(placeholder=\"Enter sentence here...\"), \n             gr.HighlightedText(),\n             examples=examples)\n\ndemo.launch()\n\n
\n\n

\n\n

Approach 2: List of Tuples

\n\n

An alternative way to pass data into the HighlightedText component is a list of tuples. The first element of each tuple should be the word or words that are being classified into a particular entity. The second element should be the entity label (or None if they should be unlabeled). The HighlightedText component automatically strings together the words and labels to display the entities.

\n\n

In some cases, this can be easier than the first approach. Here is a demo showing this approach using Spacy's parts-of-speech tagger:

\n\n
import gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n    doc = nlp(text)\n    html = displacy.render(doc, style=\"dep\", page=True)\n    html = (\n        \"
\"\n + html\n + \"
\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n\n
\n\n

\n\n
\n\n

And you're done! That's all you need to know to build a web-based GUI for your NER model.

\n\n

Fun tip: you can share your NER demo instantly with others simply by setting share=True in launch().

\n", "tags": ["NER", "TEXT", "HIGHLIGHT"], "spaces": ["https://huggingface.co/spaces/rajistics/biobert_ner_demo", "https://huggingface.co/spaces/abidlabs/ner", "https://huggingface.co/spaces/rajistics/Financial_Analyst_AI"], "url": "/guides/named-entity-recognition/", "contributor": null}, {"name": "real-time-speech-recognition", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 40, "pretty_name": "Real Time Speech Recognition", "content": "# Real Time Speech Recognition \n\n\n\n\n## Introduction\n\nAutomatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).\n\nUsing `gradio`, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.\n\nThis tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a ***full-context*** model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it ***streaming***, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused)!):\n\n\n\nReal-time ASR is inherently *stateful*, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use **state** with Gradio demos. \n\n### Prerequisites\n\nMake sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:\n\n* Transformers (for this, `pip install transformers` and `pip install torch`) \n* DeepSpeech (`pip install deepspeech==0.8.2`)\n\nMake sure you have at least one of these installed so that you can follow along the tutorial. You will also need `ffmpeg` [installed on your system](https://www.ffmpeg.org/download.html), if you do not already have it, to process files from the microphone.\n\nHere's how to build a real time speech recognition (ASR) app: \n\n1. [Set up the Transformers ASR Model](#1-set-up-the-transformers-asr-model)\n2. [Create a Full-Context ASR Demo with Transformers](#2-create-a-full-context-asr-demo-with-transformers) \n3. [Create a Streaming ASR Demo with Transformers](#3-create-a-streaming-asr-demo-with-transformers)\n4. [Create a Streaming ASR Demo with DeepSpeech](#4-create-a-streaming-asr-demo-with-deep-speech)\n\n\n## 1. Set up the Transformers ASR Model\n\nFirst, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, `Wav2Vec2`. \n\nHere is the code to load `Wav2Vec2` from Hugging Face `transformers`.\n\n```python\nfrom transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n```\n\nThat's it! By default, the automatic speech recognition model pipeline loads Facebook's `facebook/wav2vec2-base-960h` model.\n\n## 2. Create a Full-Context ASR Demo with Transformers \n\nWe will start by creating a *full-context* ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the `pipeline` object above.\n\nWe will use `gradio`'s built in `Audio` component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain `Textbox`.\n\n```python\nimport gradio as gr\n\ndef transcribe(audio):\n text = p(audio)[\"text\"]\n return text\n\ngr.Interface(\n fn=transcribe, \n inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n outputs=\"text\").launch()\n```\n\nSo what's happening here? The `transcribe` function takes a single parameter, `audio`, which is a filepath to the audio file that the user has recorded. The `pipeline` object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox. \n\nLet's see it in action! (Record a short audio clip and then click submit, or [open in a new tab](https://huggingface.co/spaces/abidlabs/full-context-asr)):\n\n\n\n## 3. Create a Streaming ASR Demo with Transformers\n\nOk great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a *streaming* interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.\n\nThe good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same `Wav2Vec2` model. \n\nThe biggest change is that we must now introduce a `state` parameter, which holds the audio that has been *transcribed so far*. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed. \n\nWhen adding state to a Gradio demo, you need to do a total of 3 things:\n\n* Add a `state` parameter to the function\n* Return the updated `state` at the end of the function\n* Add the `\"state\"` components to the `inputs` and `outputs` in `Interface` \n\nHere's what the code looks like:\n\n```python\ndef transcribe(audio, state=\"\"):\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\" \n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nNotice that we've also made one other change, which is that we've set `live=True`. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.\n\nLet's see how it does (try below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr))!\n\n\n\n\nOne thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the `transcribe()` function so that longer audio chunks are processed. We can do this by adding a `time.sleep()` inside the function, as shown below (we'll see a proper fix next) \n\n```python\nfrom transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n time.sleep(2)\n text = p(audio)[\"text\"]\n state += text + \" \"\n return state, state\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n \"state\"\n ],\n outputs=[\n \"textbox\",\n \"state\"\n ],\n live=True).launch()\n```\n\nTry the demo below to see the difference (or [open in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused))!\n\n\n\n\n## 4. Create a Streaming ASR Demo with DeepSpeech\n\nYou're not restricted to ASR models from the `transformers` library -- you can use your own models or models from other libraries. The `DeepSpeech` library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.\n\nGoing through the DeepSpeech library is beyond the scope of this Guide (check out their [excellent documentation here](https://deepspeech.readthedocs.io/en/r0.9/)), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model. \n\nHere's a complete example (on Linux):\n\nFirst install the DeepSpeech library and download the pretrained models from the terminal:\n\n```bash\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n```\n\nThen, create a similar `transcribe()` function as before:\n\n```python\nfrom deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n if sr not in (\n 48000,\n 16000,\n ): # Deepspeech only supports 16k, (we convert 48k -> 16k)\n raise ValueError(\"Unsupported rate\", sr)\n if sr == 48000:\n y = (\n ((y / max(np.max(y), 1)) * 32767)\n .reshape((-1, 3))\n .mean(axis=1)\n .astype(\"int16\")\n )\n sr = 16000\n return sr, y\n\n\ndef transcribe(speech, stream):\n _, y = reformat_freq(*speech)\n if stream is None:\n stream = model.createStream()\n stream.feedAudioContent(y)\n text = stream.intermediateDecode()\n return text, stream\n\n```\n\nThen, create a Gradio Interface as before (the only difference being that the return type should be `numpy` instead of a `filepath` to be compatible with the DeepSpeech models)\n\n```python\nimport gradio as gr\n\ngr.Interface(\n fn=transcribe, \n inputs=[\n gr.Audio(source=\"microphone\", type=\"numpy\"), \n \"state\" \n ], \n outputs= [\n \"text\", \n \"state\"\n ], \n live=True).launch()\n```\n\nRunning all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.\n\n--------------------------------------------\n\n\nAnd you're done! That's all the code you need to build a web-based GUI for your ASR model. \n\nFun tip: you can share your ASR model instantly with others simply by setting `share=True` in `launch()`. \n\n\n", "html": "

Real Time Speech Recognition

\n\n

Introduction

\n\n

Automatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).

\n\n

Using gradio, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.

\n\n

This tutorial will show how to take a pretrained speech-to-text model and deploy it with a Gradio interface. We will start with a full-context model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it streaming, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or in a new tab!):

\n\n\n\n

Real-time ASR is inherently stateful, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use state with Gradio demos.

\n\n

Prerequisites

\n\n

Make sure you have the gradio Python package already installed. You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:

\n\n
    \n
  • Transformers (for this, pip install transformers and pip install torch)
  • \n
  • DeepSpeech (pip install deepspeech==0.8.2)
  • \n
\n\n

Make sure you have at least one of these installed so that you can follow along the tutorial. You will also need ffmpeg installed on your system, if you do not already have it, to process files from the microphone.

\n\n

Here's how to build a real time speech recognition (ASR) app:

\n\n
    \n
  1. Set up the Transformers ASR Model
  2. \n
  3. Create a Full-Context ASR Demo with Transformers
  4. \n
  5. Create a Streaming ASR Demo with Transformers
  6. \n
  7. Create a Streaming ASR Demo with DeepSpeech
  8. \n
\n\n

1. Set up the Transformers ASR Model

\n\n

First, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, Wav2Vec2.

\n\n

Here is the code to load Wav2Vec2 from Hugging Face transformers.

\n\n
from transformers import pipeline\n\np = pipeline(\"automatic-speech-recognition\")\n
\n\n

That's it! By default, the automatic speech recognition model pipeline loads Facebook's facebook/wav2vec2-base-960h model.

\n\n

2. Create a Full-Context ASR Demo with Transformers

\n\n

We will start by creating a full-context ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the pipeline object above.

\n\n

We will use gradio's built in Audio component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain Textbox.

\n\n
import gradio as gr\n\ndef transcribe(audio):\n    text = p(audio)[\"text\"]\n    return text\n\ngr.Interface(\n    fn=transcribe, \n    inputs=gr.Audio(source=\"microphone\", type=\"filepath\"), \n    outputs=\"text\").launch()\n
\n\n

So what's happening here? The transcribe function takes a single parameter, audio, which is a filepath to the audio file that the user has recorded. The pipeline object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox.

\n\n

Let's see it in action! (Record a short audio clip and then click submit, or open in a new tab):

\n\n\n\n

3. Create a Streaming ASR Demo with Transformers

\n\n

Ok great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a streaming interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.

\n\n

The good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same Wav2Vec2 model.

\n\n

The biggest change is that we must now introduce a state parameter, which holds the audio that has been transcribed so far. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed.

\n\n

When adding state to a Gradio demo, you need to do a total of 3 things:

\n\n
    \n
  • Add a state parameter to the function
  • \n
  • Return the updated state at the end of the function
  • \n
  • Add the \"state\" components to the inputs and outputs in Interface
  • \n
\n\n

Here's what the code looks like:

\n\n
def transcribe(audio, state=\"\"):\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\n# Set the starting state to an empty string\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\" \n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Notice that we've also made one other change, which is that we've set live=True. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.

\n\n

Let's see how it does (try below or in a new tab)!

\n\n\n\n

One thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A \"hacky\" fix to this is to simply increase the runtime of the transcribe() function so that longer audio chunks are processed. We can do this by adding a time.sleep() inside the function, as shown below (we'll see a proper fix next)

\n\n
from transformers import pipeline\nimport gradio as gr\nimport time\n\np = pipeline(\"automatic-speech-recognition\")\n\ndef transcribe(audio, state=\"\"):\n    time.sleep(2)\n    text = p(audio)[\"text\"]\n    state += text + \" \"\n    return state, state\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"filepath\", streaming=True), \n        \"state\"\n    ],\n    outputs=[\n        \"textbox\",\n        \"state\"\n    ],\n    live=True).launch()\n
\n\n

Try the demo below to see the difference (or open in a new tab)!

\n\n\n\n

4. Create a Streaming ASR Demo with DeepSpeech

\n\n

You're not restricted to ASR models from the transformers library -- you can use your own models or models from other libraries. The DeepSpeech library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.

\n\n

Going through the DeepSpeech library is beyond the scope of this Guide (check out their excellent documentation here), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model.

\n\n

Here's a complete example (on Linux):

\n\n

First install the DeepSpeech library and download the pretrained models from the terminal:

\n\n
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm\nwget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer\napt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg\npip install deepspeech==0.8.2\n
\n\n

Then, create a similar transcribe() function as before:

\n\n
from deepspeech import Model\nimport numpy as np\n\nmodel_file_path = \"deepspeech-0.8.2-models.pbmm\"\nlm_file_path = \"deepspeech-0.8.2-models.scorer\"\nbeam_width = 100\nlm_alpha = 0.93\nlm_beta = 1.18\n\nmodel = Model(model_file_path)\nmodel.enableExternalScorer(lm_file_path)\nmodel.setScorerAlphaBeta(lm_alpha, lm_beta)\nmodel.setBeamWidth(beam_width)\n\n\ndef reformat_freq(sr, y):\n    if sr not in (\n        48000,\n        16000,\n    ):  # Deepspeech only supports 16k, (we convert 48k -> 16k)\n        raise ValueError(\"Unsupported rate\", sr)\n    if sr == 48000:\n        y = (\n            ((y / max(np.max(y), 1)) * 32767)\n            .reshape((-1, 3))\n            .mean(axis=1)\n            .astype(\"int16\")\n        )\n        sr = 16000\n    return sr, y\n\n\ndef transcribe(speech, stream):\n    _, y = reformat_freq(*speech)\n    if stream is None:\n        stream = model.createStream()\n    stream.feedAudioContent(y)\n    text = stream.intermediateDecode()\n    return text, stream\n\n
\n\n

Then, create a Gradio Interface as before (the only difference being that the return type should be numpy instead of a filepath to be compatible with the DeepSpeech models)

\n\n
import gradio as gr\n\ngr.Interface(\n    fn=transcribe, \n    inputs=[\n        gr.Audio(source=\"microphone\", type=\"numpy\"), \n        \"state\" \n    ], \n    outputs= [\n        \"text\", \n        \"state\"\n    ], \n    live=True).launch()\n
\n\n

Running all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.

\n\n
\n\n

And you're done! That's all the code you need to build a web-based GUI for your ASR model.

\n\n

Fun tip: you can share your ASR model instantly with others simply by setting share=True in launch().

\n", "tags": ["ASR", "SPEECH", "STREAMING"], "spaces": ["https://huggingface.co/spaces/abidlabs/streaming-asr-paused", "https://huggingface.co/spaces/abidlabs/full-context-asr"], "url": "/guides/real-time-speech-recognition/", "contributor": null}, {"name": "running-background-tasks", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 41, "pretty_name": "Running Background Tasks", "content": "# Running Background Tasks \n\n\n\n\n## Introduction\n\nThis guide explains how you can run background tasks from your gradio app.\nBackground tasks are operations that you'd like to perform outside the request-response\nlifecycle of your app either once or on a periodic schedule.\nExamples of background tasks include periodically synchronizing data to an external database or \nsending a report of model predictions via email.\n\n## Overview \n \nWe will be creating a simple \"Google-forms-style\" application to gather feedback from users of the gradio library.\nWe will use a local sqlite database to store our data, but we will periodically synchronize the state of the database\nwith a [HuggingFace Dataset](https://huggingface.co/datasets) so that our user reviews are always backed up.\nThe synchronization will happen in a background task running every 60 seconds.\n\nAt the end of the demo, you'll have a fully working application like this one:\n\n \n\n\n## Step 1 - Write your database logic \ud83d\udcbe\nOur application will store the name of the reviewer, their rating of gradio on a scale of 1 to 5, as well as\nany comments they want to share about the library. Let's write some code that creates a database table to\nstore this data. We'll also write some functions to insert a review into that table and fetch the latest 10 reviews.\n\nWe're going to use the `sqlite3` library to connect to our sqlite database but gradio will work with any library.\n\nThe code will look like this:\n\n```python\nDB_FILE = \"./reviews.db\"\ndb = sqlite3.connect(DB_FILE)\n\n# Create table if it doesn't already exist\ntry:\n db.execute(\"SELECT * FROM reviews\").fetchall()\n db.close()\nexcept sqlite3.OperationalError:\n db.execute(\n '''\n CREATE TABLE reviews (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,\n name TEXT, review INTEGER, comments TEXT)\n ''')\n db.commit()\n db.close()\n\ndef get_latest_reviews(db: sqlite3.Connection):\n reviews = db.execute(\"SELECT * FROM reviews ORDER BY id DESC limit 10\").fetchall()\n total_reviews = db.execute(\"Select COUNT(id) from reviews\").fetchone()[0]\n reviews = pd.DataFrame(reviews, columns=[\"id\", \"date_created\", \"name\", \"review\", \"comments\"])\n return reviews, total_reviews\n\n\ndef add_review(name: str, review: int, comments: str):\n db = sqlite3.connect(DB_FILE)\n cursor = db.cursor()\n cursor.execute(\"INSERT INTO reviews(name, review, comments) VALUES(?,?,?)\", [name, review, comments])\n db.commit()\n reviews, total_reviews = get_latest_reviews(db)\n db.close()\n return reviews, total_reviews\n```\n\nLet's also write a function to load the latest reviews when the gradio application loads:\n```python\ndef load_data():\n db = sqlite3.connect(DB_FILE)\n reviews, total_reviews = get_latest_reviews(db)\n db.close()\n return reviews, total_reviews\n```\n\n## Step 2 - Create a gradio app \u26a1\nNow that we have our database logic defined, we can use gradio create a dynamic web page to ask our users for feedback! \n\n```python\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n name = gr.Textbox(label=\"Name\", placeholder=\"What is your name?\")\n review = gr.Radio(label=\"How satisfied are you with using gradio?\", choices=[1, 2, 3, 4, 5])\n comments = gr.Textbox(label=\"Comments\", lines=10, placeholder=\"Do you have any feedback on gradio?\")\n submit = gr.Button(value=\"Submit Feedback\")\n with gr.Column():\n data = gr.Dataframe(label=\"Most recently created 10 rows\")\n count = gr.Number(label=\"Total number of reviews\")\n submit.click(add_review, [name, review, comments], [data, count])\n demo.load(load_data, None, [data, count])\n```\n\n## Step 3 - Synchronize with HuggingFace Datasets \ud83e\udd17\n\nWe could call `demo.launch()` after step 2 and have a fully functioning application. However,\nour data would be stored locally on our machine. If the sqlite file were accidentally deleted, we'd lose all of our reviews!\nLet's back up our data to a dataset on the HuggingFace hub.\n\nCreate a dataset [here](https://huggingface.co/datasets) before proceeding.\n\nNow at the **top** of our script, we'll use the [huggingface hub client library](https://huggingface.co/docs/huggingface_hub/index)\nto connect to our dataset and pull the latest backup.\n\n```python\nTOKEN = os.environ.get('HUB_TOKEN')\nrepo = huggingface_hub.Repository(\n local_dir=\"data\",\n repo_type=\"dataset\",\n clone_from=\"\",\n use_auth_token=TOKEN\n)\nrepo.git_pull()\n\nshutil.copyfile(\"./data/reviews.db\", DB_FILE)\n```\n\nNote that you'll have to get an access token from the \"Settings\" tab of your HuggingFace for the above code to work.\nIn the script, the token is securely accessed via an environment variable.\n\n![access_token](https://github.com/gradio-app/gradio/blob/main/guides/assets/access_token.png?raw=true)\n\nNow we will create a background task to synch our local database to the dataset hub every 60 seconds.\nWe will use the [AdvancedPythonScheduler](https://apscheduler.readthedocs.io/en/3.x/) to handle the scheduling.\nHowever, this is not the only task scheduling library available. Feel free to use whatever you are comfortable with.\n\nThe function to back up our data will look like this:\n\n```python\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\ndef backup_db():\n shutil.copyfile(DB_FILE, \"./data/reviews.db\")\n db = sqlite3.connect(DB_FILE)\n reviews = db.execute(\"SELECT * FROM reviews\").fetchall()\n pd.DataFrame(reviews).to_csv(\"./data/reviews.csv\", index=False)\n print(\"updating db\")\n repo.push_to_hub(blocking=False, commit_message=f\"Updating data at {datetime.datetime.now()}\")\n\n\nscheduler = BackgroundScheduler()\nscheduler.add_job(func=backup_db, trigger=\"interval\", seconds=60)\nscheduler.start()\n```\n\n\n## Step 4 (Bonus) - Deployment to HuggingFace Spaces\nYou can use the HuggingFace [Spaces](https://huggingface.co/spaces) platform to deploy this application for free \u2728\n\nIf you haven't used Spaces before, follow the previous guide [here](/using_hugging_face_integrations).\nYou will have to use the `HUB_TOKEN` environment variable as a secret in the Guides.\n\n## Conclusion\nCongratulations! You know how to run background tasks from your gradio app on a schedule \u23f2\ufe0f. \n\nCheckout the application running on Spaces [here](https://huggingface.co/spaces/freddyaboulton/gradio-google-forms).\nThe complete code is [here](https://huggingface.co/spaces/freddyaboulton/gradio-google-forms/blob/main/app.py)", "html": "

Running Background Tasks

\n\n

Introduction

\n\n

This guide explains how you can run background tasks from your gradio app.\nBackground tasks are operations that you'd like to perform outside the request-response\nlifecycle of your app either once or on a periodic schedule.\nExamples of background tasks include periodically synchronizing data to an external database or \nsending a report of model predictions via email.

\n\n

Overview

\n\n

We will be creating a simple \"Google-forms-style\" application to gather feedback from users of the gradio library.\nWe will use a local sqlite database to store our data, but we will periodically synchronize the state of the database\nwith a HuggingFace Dataset so that our user reviews are always backed up.\nThe synchronization will happen in a background task running every 60 seconds.

\n\n

At the end of the demo, you'll have a fully working application like this one:

\n\n

\n\n

Step 1 - Write your database logic \ud83d\udcbe

\n\n

Our application will store the name of the reviewer, their rating of gradio on a scale of 1 to 5, as well as\nany comments they want to share about the library. Let's write some code that creates a database table to\nstore this data. We'll also write some functions to insert a review into that table and fetch the latest 10 reviews.

\n\n

We're going to use the sqlite3 library to connect to our sqlite database but gradio will work with any library.

\n\n

The code will look like this:

\n\n
DB_FILE = \"./reviews.db\"\ndb = sqlite3.connect(DB_FILE)\n\n# Create table if it doesn't already exist\ntry:\n    db.execute(\"SELECT * FROM reviews\").fetchall()\n    db.close()\nexcept sqlite3.OperationalError:\n    db.execute(\n        '''\n        CREATE TABLE reviews (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n                              created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,\n                              name TEXT, review INTEGER, comments TEXT)\n        ''')\n    db.commit()\n    db.close()\n\ndef get_latest_reviews(db: sqlite3.Connection):\n    reviews = db.execute(\"SELECT * FROM reviews ORDER BY id DESC limit 10\").fetchall()\n    total_reviews = db.execute(\"Select COUNT(id) from reviews\").fetchone()[0]\n    reviews = pd.DataFrame(reviews, columns=[\"id\", \"date_created\", \"name\", \"review\", \"comments\"])\n    return reviews, total_reviews\n\n\ndef add_review(name: str, review: int, comments: str):\n    db = sqlite3.connect(DB_FILE)\n    cursor = db.cursor()\n    cursor.execute(\"INSERT INTO reviews(name, review, comments) VALUES(?,?,?)\", [name, review, comments])\n    db.commit()\n    reviews, total_reviews = get_latest_reviews(db)\n    db.close()\n    return reviews, total_reviews\n
\n\n

Let's also write a function to load the latest reviews when the gradio application loads:

\n\n
def load_data():\n    db = sqlite3.connect(DB_FILE)\n    reviews, total_reviews = get_latest_reviews(db)\n    db.close()\n    return reviews, total_reviews\n
\n\n

Step 2 - Create a gradio app \u26a1

\n\n

Now that we have our database logic defined, we can use gradio create a dynamic web page to ask our users for feedback!

\n\n
with gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            name = gr.Textbox(label=\"Name\", placeholder=\"What is your name?\")\n            review = gr.Radio(label=\"How satisfied are you with using gradio?\", choices=[1, 2, 3, 4, 5])\n            comments = gr.Textbox(label=\"Comments\", lines=10, placeholder=\"Do you have any feedback on gradio?\")\n            submit = gr.Button(value=\"Submit Feedback\")\n        with gr.Column():\n            data = gr.Dataframe(label=\"Most recently created 10 rows\")\n            count = gr.Number(label=\"Total number of reviews\")\n    submit.click(add_review, [name, review, comments], [data, count])\n    demo.load(load_data, None, [data, count])\n
\n\n

Step 3 - Synchronize with HuggingFace Datasets \ud83e\udd17

\n\n

We could call demo.launch() after step 2 and have a fully functioning application. However,\nour data would be stored locally on our machine. If the sqlite file were accidentally deleted, we'd lose all of our reviews!\nLet's back up our data to a dataset on the HuggingFace hub.

\n\n

Create a dataset here before proceeding.

\n\n

Now at the top of our script, we'll use the huggingface hub client library\nto connect to our dataset and pull the latest backup.

\n\n
TOKEN = os.environ.get('HUB_TOKEN')\nrepo = huggingface_hub.Repository(\n    local_dir=\"data\",\n    repo_type=\"dataset\",\n    clone_from=\"\",\n    use_auth_token=TOKEN\n)\nrepo.git_pull()\n\nshutil.copyfile(\"./data/reviews.db\", DB_FILE)\n
\n\n

Note that you'll have to get an access token from the \"Settings\" tab of your HuggingFace for the above code to work.\nIn the script, the token is securely accessed via an environment variable.

\n\n

\"access_token\"

\n\n

Now we will create a background task to synch our local database to the dataset hub every 60 seconds.\nWe will use the AdvancedPythonScheduler to handle the scheduling.\nHowever, this is not the only task scheduling library available. Feel free to use whatever you are comfortable with.

\n\n

The function to back up our data will look like this:

\n\n
from apscheduler.schedulers.background import BackgroundScheduler\n\ndef backup_db():\n    shutil.copyfile(DB_FILE, \"./data/reviews.db\")\n    db = sqlite3.connect(DB_FILE)\n    reviews = db.execute(\"SELECT * FROM reviews\").fetchall()\n    pd.DataFrame(reviews).to_csv(\"./data/reviews.csv\", index=False)\n    print(\"updating db\")\n    repo.push_to_hub(blocking=False, commit_message=f\"Updating data at {datetime.datetime.now()}\")\n\n\nscheduler = BackgroundScheduler()\nscheduler.add_job(func=backup_db, trigger=\"interval\", seconds=60)\nscheduler.start()\n
\n\n

Step 4 (Bonus) - Deployment to HuggingFace Spaces

\n\n

You can use the HuggingFace Spaces platform to deploy this application for free \u2728

\n\n

If you haven't used Spaces before, follow the previous guide here.\nYou will have to use the HUB_TOKEN environment variable as a secret in the Guides.

\n\n

Conclusion

\n\n

Congratulations! You know how to run background tasks from your gradio app on a schedule \u23f2\ufe0f.

\n\n

Checkout the application running on Spaces here.\nThe complete code is here

\n", "tags": ["TASKS", "SCHEDULED", "TABULAR", "DATA "], "spaces": ["https://huggingface.co/spaces/freddyaboulton/gradio-google-forms"], "url": "/guides/running-background-tasks/", "contributor": null}, {"name": "running-gradio-on-your-web-server-with-nginx", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 42, "pretty_name": "Running Gradio On Your Web Server With Nginx", "content": "# Running a Gradio App on your Web Server with Nginx\n\n\n\n## Introduction\n\nGradio is a Python library that allows you to quickly create customizable web apps for your machine learning models and data processing pipelines. Gradio apps can be deployed on [Hugging Face Spaces](https://hf.space) for free. \n\nIn some cases though, you might want to deploy a Gradio app on your own web server. You might already be using [Nginx](https://www.nginx.com/), a highly performant web server, to serve your website (say `https://www.example.com`), and you want to attach Gradio to a specific subpath on your website (e.g. `https://www.example.com/gradio-demo`). \n\nIn this Guide, we will guide you through the process of running a Gradio app behind Nginx on your own web server to achieve this.\n\n**Prerequisites**\n\n1. A Linux web server with [Nginx installed](https://www.nginx.com/blog/setting-up-nginx/) and [Gradio installed](/quickstart) \n \n2. A working Gradio app saved as a python file on your web server\n\n## Editing your Nginx configuration file\n\n1. Start by editing the Nginx configuration file on your web server. By default, this is located at: `/etc/nginx/nginx.conf`\n\nIn the `http` block, add the following line to include server block configurations from a separate file:\n\n```bash\ninclude /etc/nginx/sites-enabled/*;\n```\n\n2. Create a new file in the `/etc/nginx/sites-available` directory (create the directory if it does not already exist), using a filename that represents your app, for example: `sudo nano /etc/nginx/sites-available/my_gradio_app`\n\n3. Paste the following into your file editor:\n\n```bash\nserver {\n listen 80;\n server_name example.com www.example.com; # Change this to your domain name \n\n location /gradio-demo/ { # Change this if you'd like to server your Gradio app on a different path\n proxy_pass http://127.0.0.1:7860/; # Change this if your Gradio app will be running on a different port\n proxy_redirect off;\n proxy_http_version 1.1;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"upgrade\";\n proxy_set_header Host $host;\n }\n}\n```\n\n## Run your Gradio app on your web server\n\n1. Before you launch your Gradio app, you'll need to set the `root_path` to be the same as the subpath that you specified in your nginx configuration. This is necessary for Gradio to run on any subpath besides the root of the domain.\n\nHere's a simple example of a Gradio app with a custom `root_path`:\n\n```python\nimport gradio as gr\nimport time\n\ndef test(x):\ntime.sleep(4)\nreturn x\n\ngr.Interface(test, \"textbox\", \"textbox\").queue().launch(root_path=\"/gradio-demo\")\n```\n\n2. Start a `tmux` session by typing `tmux` and pressing enter (optional) \n\nIt's recommended that you run your Gradio app in a `tmux` session so that you can keep it running in the background easily\n\n3. Then, start your Gradio app. Simply type in `python` followed by the name of your Gradio python file. By default, your app will run on `localhost:7860`, but if it starts on a different port, you will need to update the nginx configuration file above.\n\n## Restart Nginx\n\n1. If you are in a tmux session, exit by typing CTRL+B (or CMD+B), followed by the \"D\" key.\n\n2. Finally, restart nginx by running `sudo systemctl restart nginx`. \n\nAnd that's it! If you visit `https://example.com/gradio-demo` on your browser, you should see your Gradio app running there\n\n", "html": "

Running a Gradio App on your Web Server with Nginx

\n\n

Introduction

\n\n

Gradio is a Python library that allows you to quickly create customizable web apps for your machine learning models and data processing pipelines. Gradio apps can be deployed on Hugging Face Spaces for free.

\n\n

In some cases though, you might want to deploy a Gradio app on your own web server. You might already be using Nginx, a highly performant web server, to serve your website (say https://www.example.com), and you want to attach Gradio to a specific subpath on your website (e.g. https://www.example.com/gradio-demo).

\n\n

In this Guide, we will guide you through the process of running a Gradio app behind Nginx on your own web server to achieve this.

\n\n

Prerequisites

\n\n
    \n
  1. A Linux web server with Nginx installed and Gradio installed

  2. \n
  3. A working Gradio app saved as a python file on your web server

  4. \n
\n\n

Editing your Nginx configuration file

\n\n
    \n
  1. Start by editing the Nginx configuration file on your web server. By default, this is located at: /etc/nginx/nginx.conf
  2. \n
\n\n

In the http block, add the following line to include server block configurations from a separate file:

\n\n
include /etc/nginx/sites-enabled/*;\n
\n\n
    \n
  1. Create a new file in the /etc/nginx/sites-available directory (create the directory if it does not already exist), using a filename that represents your app, for example: sudo nano /etc/nginx/sites-available/my_gradio_app

  2. \n
  3. Paste the following into your file editor:

  4. \n
\n\n
server {\n    listen 80;\n    server_name example.com www.example.com;  # Change this to your domain name \n\n    location /gradio-demo/ {  # Change this if you'd like to server your Gradio app on a different path\n        proxy_pass http://127.0.0.1:7860/; # Change this if your Gradio app will be running on a different port\n        proxy_redirect off;\n        proxy_http_version 1.1;\n        proxy_set_header Upgrade $http_upgrade;\n        proxy_set_header Connection \"upgrade\";\n        proxy_set_header Host $host;\n    }\n}\n
\n\n

Run your Gradio app on your web server

\n\n
    \n
  1. Before you launch your Gradio app, you'll need to set the root_path to be the same as the subpath that you specified in your nginx configuration. This is necessary for Gradio to run on any subpath besides the root of the domain.
  2. \n
\n\n

Here's a simple example of a Gradio app with a custom root_path:

\n\n
import gradio as gr\nimport time\n\ndef test(x):\ntime.sleep(4)\nreturn x\n\ngr.Interface(test, \"textbox\", \"textbox\").queue().launch(root_path=\"/gradio-demo\")\n
\n\n
    \n
  1. Start a tmux session by typing tmux and pressing enter (optional)
  2. \n
\n\n

It's recommended that you run your Gradio app in a tmux session so that you can keep it running in the background easily

\n\n
    \n
  1. Then, start your Gradio app. Simply type in python followed by the name of your Gradio python file. By default, your app will run on localhost:7860, but if it starts on a different port, you will need to update the nginx configuration file above.
  2. \n
\n\n

Restart Nginx

\n\n
    \n
  1. If you are in a tmux session, exit by typing CTRL+B (or CMD+B), followed by the \"D\" key.

  2. \n
  3. Finally, restart nginx by running sudo systemctl restart nginx.

  4. \n
\n\n

And that's it! If you visit https://example.com/gradio-demo on your browser, you should see your Gradio app running there

\n", "tags": ["DEPLOYMENT", "WEB SERVER", "NGINX"], "spaces": [], "url": "/guides/running-gradio-on-your-web-server-with-nginx/", "contributor": null}, {"name": "setting-up-a-demo-for-maximum-performance", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 43, "pretty_name": "Setting Up A Demo For Maximum Performance", "content": "# Setting Up a Demo for Maximum Performance\n\n\n\n\nLet's say that your Gradio demo goes *viral* on social media -- you have lots of users trying it out simultaneously, and you want to provide your users with the best possible experience or, in other words, minimize the amount of time that each user has to wait in the queue to see their prediction.\n\nHow can you configure your Gradio demo to handle the most traffic? In this Guide, we dive into some of the parameters of Gradio's `.queue()` method as well as some other related configurations, and discuss how to set these parameters in a way that allows you to serve lots of users simultaneously with minimal latency.\n\nThis is an advanced guide, so make sure you know the basics of Gradio already, such as [how to create and launch a Gradio Interface](https://gradio.app/guides/quickstart/). Most of the information in this Guide is relevant whether you are hosting your demo on [Hugging Face Spaces](https://hf.space) or on your own server.\n\n## Enabling Gradio's Queueing System\n\nBy default, a Gradio demo does not use queueing and instead sends prediction requests via a POST request to the server where your Gradio server and Python code are running. However, regular POST requests have two big limitations:\n\n(1) They time out -- most browsers raise a timeout error\nif they do not get a response to a POST request after a short period of time (e.g. 1 min).\nThis can be a problem if your inference function takes longer than 1 minute to run or\nif many people are trying out your demo at the same time, resulting in increased latency.\n\n(2) They do not allow bi-directional communication between the Gradio demo and the Gradio server. This means, for example, that you cannot get a real-time ETA of how long your prediction will take to complete.\n\nTo address these limitations, any Gradio app can be converted to use **websockets** instead, simply by adding `.queue()` before launching an Interface or a Blocks. Here's an example:\n\n```py\napp = gr.Interface(lambda x:x, \"image\", \"image\")\napp.queue() # <-- Sets up a queue with default parameters\napp.launch()\n```\n\nIn the demo `app` above, predictions will now be sent over a websocket instead.\nUnlike POST requests, websockets do not timeout and they allow bidirectional traffic. On the Gradio server, a **queue** is set up, which adds each request that comes to a list. When a worker is free, the first available request is passed into the worker for inference. When the inference is complete, the queue sends the prediction back through the websocket to the particular Gradio user who called that prediction. \n\nNote: If you host your Gradio app on [Hugging Face Spaces](https://hf.space), the queue is already **enabled by default**. You can still call the `.queue()` method manually in order to configure the queue parameters described below.\n\n## Queuing Parameters\n\nThere are several parameters that can be used to configure the queue and help reduce latency. Let's go through them one-by-one.\n\n### The `concurrency_count` parameter\n\nThe first parameter we will explore is the `concurrency_count` parameter of `queue()`. This parameter is used to set the number of worker threads in the Gradio server that will be processing your requests in parallel. By default, this parameter is set to `1` but increasing this can **linearly multiply the capacity of your server to handle requests**.\n\nSo why not set this parameter much higher? Keep in mind that since requests are processed in parallel, each request will consume memory to store the data and weights for processing. This means that you might get out-of-memory errors if you increase the `concurrency_count` too high. You may also start to get diminishing returns if the `concurrency_count` is too high because of costs of switching between different worker threads.\n\n**Recommendation**: Increase the `concurrency_count` parameter as high as you can while you continue to see performance gains or until you hit memory limits on your machine. You can [read about Hugging Face Spaces machine specs here](https://huggingface.co/docs/hub/spaces-overview). \n\n*Note*: there is a second parameter which controls the *total* number of threads that Gradio can generate, whether or not queuing is enabled. This is the `max_threads` parameter in the `launch()` method. When you increase the `concurrency_count` parameter in `queue()`, this is automatically increased as well. However, in some cases, you may want to manually increase this, e.g. if queuing is not enabled. \n\n### The `max_size` parameter\n\nA more blunt way to reduce the wait times is simply to prevent too many people from joining the queue in the first place. You can set the maximum number of requests that the queue processes using the `max_size` parameter of `queue()`. If a request arrives when the queue is already of the maximum size, it will not be allowed to join the queue and instead, the user will receive an error saying that the queue is full and to try again. By default, `max_size=None`, meaning that there is no limit to the number of users that can join the queue.\n\nParadoxically, setting a `max_size` can often improve user experience because it prevents users from being dissuaded by very long queue wait times. Users who are more interested and invested in your demo will keep trying to join the queue, and will be able to get their results faster. \n\n**Recommendation**: For a better user experience, set a `max_size` that is reasonable given your expectations of how long users might be willing to wait for a prediction. \n\n### The `max_batch_size` parameter\n\nAnother way to increase the parallelism of your Gradio demo is to write your function so that it can accept **batches** of inputs. Most deep learning models can process batches of samples more efficiently than processing individual samples. \n\nIf you write your function to process a batch of samples, Gradio will automatically batch incoming requests together and pass them into your function as a batch of samples. You need to set `batch` to `True` (by default it is `False`) and set a `max_batch_size` (by default it is `4`) based on the maximum number of samples your function is able to handle. These two parameters can be passed into `gr.Interface()` or to an event in Blocks such as `.click()`. \n\nWhile setting a batch is conceptually similar to having workers process requests in parallel, it is often *faster* than setting the `concurrency_count` for deep learning models. The downside is that you might need to adapt your function a little bit to accept batches of samples instead of individual samples. \n\nHere's an example of a function that does *not* accept a batch of inputs -- it processes a single input at a time:\n\n```py\nimport time\n\ndef trim_words(word, length):\n return w[:int(length)]\n\n```\n\nHere's the same function rewritten to take in a batch of samples:\n\n```py\nimport time\n\ndef trim_words(words, lengths):\n trimmed_words = []\n for w, l in zip(words, lengths):\n trimmed_words.append(w[:int(l)]) \n return [trimmed_words]\n\n```\n\nThe second function can be used with `batch=True` and an appropriate `max_batch_size` parameter.\n\n**Recommendation**: If possible, write your function to accept batches of samples, and then set `batch` to `True` and the `max_batch_size` as high as possible based on your machine's memory limits. If you set `max_batch_size` as high as possible, you will most likely need to set `concurrency_count` back to `1` since you will no longer have the memory to have multiple workers running in parallel. \n\n### The `api_open` parameter\n\nWhen creating a Gradio demo, you may want to restrict all traffic to happen through the user interface as opposed to the [programmatic API](/guides/sharing-your-app/#api-page) that is automatically created for your Gradio demo. This is important because when people make requests through the programmatic API, they can potentially bypass users who are waiting in the queue and degrade the experience of these users. \n\n**Recommendation**: set the `api_open` parameter in `queue()` to `False` in your demo to prevent programmatic requests.\n\n\n\n### Upgrading your Hardware (GPUs, TPUs, etc.)\n\nIf you have done everything above, and your demo is still not fast enough, you can upgrade the hardware that your model is running on. Changing the model from running on CPUs to running on GPUs will usually provide a 10x-50x increase in inference time for deep learning models.\n\nIt is particularly straightforward to upgrade your Hardware on Hugging Face Spaces. Simply click on the \"Settings\" tab in your Space and choose the Space Hardware you'd like.\n\n![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/spaces-gpu-settings.png)\n\nWhile you might need to adapt portions of your machine learning inference code to run on a GPU (here's a [handy guide](https://cnvrg.io/pytorch-cuda/) if you are using PyTorch), Gradio is completely agnostic to the choice of hardware and will work completely fine if you use it with CPUs, GPUs, TPUs, or any other hardware!\n\nNote: your GPU memory is different than your CPU memory, so if you upgrade your hardware,\nyou might need to adjust the value of the `concurrency_count` parameter described above.\n\n## Conclusion\n\nCongratulations! You know how to set up a Gradio demo for maximum performance. Good luck on your next viral demo! \n\n", "html": "

Setting Up a Demo for Maximum Performance

\n\n

Let's say that your Gradio demo goes viral on social media -- you have lots of users trying it out simultaneously, and you want to provide your users with the best possible experience or, in other words, minimize the amount of time that each user has to wait in the queue to see their prediction.

\n\n

How can you configure your Gradio demo to handle the most traffic? In this Guide, we dive into some of the parameters of Gradio's .queue() method as well as some other related configurations, and discuss how to set these parameters in a way that allows you to serve lots of users simultaneously with minimal latency.

\n\n

This is an advanced guide, so make sure you know the basics of Gradio already, such as how to create and launch a Gradio Interface. Most of the information in this Guide is relevant whether you are hosting your demo on Hugging Face Spaces or on your own server.

\n\n

Enabling Gradio's Queueing System

\n\n

By default, a Gradio demo does not use queueing and instead sends prediction requests via a POST request to the server where your Gradio server and Python code are running. However, regular POST requests have two big limitations:

\n\n

(1) They time out -- most browsers raise a timeout error\nif they do not get a response to a POST request after a short period of time (e.g. 1 min).\nThis can be a problem if your inference function takes longer than 1 minute to run or\nif many people are trying out your demo at the same time, resulting in increased latency.

\n\n

(2) They do not allow bi-directional communication between the Gradio demo and the Gradio server. This means, for example, that you cannot get a real-time ETA of how long your prediction will take to complete.

\n\n

To address these limitations, any Gradio app can be converted to use websockets instead, simply by adding .queue() before launching an Interface or a Blocks. Here's an example:

\n\n
app = gr.Interface(lambda x:x, \"image\", \"image\")\napp.queue()  # <-- Sets up a queue with default parameters\napp.launch()\n
\n\n

In the demo app above, predictions will now be sent over a websocket instead.\nUnlike POST requests, websockets do not timeout and they allow bidirectional traffic. On the Gradio server, a queue is set up, which adds each request that comes to a list. When a worker is free, the first available request is passed into the worker for inference. When the inference is complete, the queue sends the prediction back through the websocket to the particular Gradio user who called that prediction.

\n\n

Note: If you host your Gradio app on Hugging Face Spaces, the queue is already enabled by default. You can still call the .queue() method manually in order to configure the queue parameters described below.

\n\n

Queuing Parameters

\n\n

There are several parameters that can be used to configure the queue and help reduce latency. Let's go through them one-by-one.

\n\n

The concurrency_count parameter

\n\n

The first parameter we will explore is the concurrency_count parameter of queue(). This parameter is used to set the number of worker threads in the Gradio server that will be processing your requests in parallel. By default, this parameter is set to 1 but increasing this can linearly multiply the capacity of your server to handle requests.

\n\n

So why not set this parameter much higher? Keep in mind that since requests are processed in parallel, each request will consume memory to store the data and weights for processing. This means that you might get out-of-memory errors if you increase the concurrency_count too high. You may also start to get diminishing returns if the concurrency_count is too high because of costs of switching between different worker threads.

\n\n

Recommendation: Increase the concurrency_count parameter as high as you can while you continue to see performance gains or until you hit memory limits on your machine. You can read about Hugging Face Spaces machine specs here.

\n\n

Note: there is a second parameter which controls the total number of threads that Gradio can generate, whether or not queuing is enabled. This is the max_threads parameter in the launch() method. When you increase the concurrency_count parameter in queue(), this is automatically increased as well. However, in some cases, you may want to manually increase this, e.g. if queuing is not enabled.

\n\n

The max_size parameter

\n\n

A more blunt way to reduce the wait times is simply to prevent too many people from joining the queue in the first place. You can set the maximum number of requests that the queue processes using the max_size parameter of queue(). If a request arrives when the queue is already of the maximum size, it will not be allowed to join the queue and instead, the user will receive an error saying that the queue is full and to try again. By default, max_size=None, meaning that there is no limit to the number of users that can join the queue.

\n\n

Paradoxically, setting a max_size can often improve user experience because it prevents users from being dissuaded by very long queue wait times. Users who are more interested and invested in your demo will keep trying to join the queue, and will be able to get their results faster.

\n\n

Recommendation: For a better user experience, set a max_size that is reasonable given your expectations of how long users might be willing to wait for a prediction.

\n\n

The max_batch_size parameter

\n\n

Another way to increase the parallelism of your Gradio demo is to write your function so that it can accept batches of inputs. Most deep learning models can process batches of samples more efficiently than processing individual samples.

\n\n

If you write your function to process a batch of samples, Gradio will automatically batch incoming requests together and pass them into your function as a batch of samples. You need to set batch to True (by default it is False) and set a max_batch_size (by default it is 4) based on the maximum number of samples your function is able to handle. These two parameters can be passed into gr.Interface() or to an event in Blocks such as .click().

\n\n

While setting a batch is conceptually similar to having workers process requests in parallel, it is often faster than setting the concurrency_count for deep learning models. The downside is that you might need to adapt your function a little bit to accept batches of samples instead of individual samples.

\n\n

Here's an example of a function that does not accept a batch of inputs -- it processes a single input at a time:

\n\n
import time\n\ndef trim_words(word, length):\n    return w[:int(length)]\n\n
\n\n

Here's the same function rewritten to take in a batch of samples:

\n\n
import time\n\ndef trim_words(words, lengths):\n    trimmed_words = []\n    for w, l in zip(words, lengths):\n        trimmed_words.append(w[:int(l)])        \n    return [trimmed_words]\n\n
\n\n

The second function can be used with batch=True and an appropriate max_batch_size parameter.

\n\n

Recommendation: If possible, write your function to accept batches of samples, and then set batch to True and the max_batch_size as high as possible based on your machine's memory limits. If you set max_batch_size as high as possible, you will most likely need to set concurrency_count back to 1 since you will no longer have the memory to have multiple workers running in parallel.

\n\n

The api_open parameter

\n\n

When creating a Gradio demo, you may want to restrict all traffic to happen through the user interface as opposed to the programmatic API that is automatically created for your Gradio demo. This is important because when people make requests through the programmatic API, they can potentially bypass users who are waiting in the queue and degrade the experience of these users.

\n\n

Recommendation: set the api_open parameter in queue() to False in your demo to prevent programmatic requests.

\n\n

Upgrading your Hardware (GPUs, TPUs, etc.)

\n\n

If you have done everything above, and your demo is still not fast enough, you can upgrade the hardware that your model is running on. Changing the model from running on CPUs to running on GPUs will usually provide a 10x-50x increase in inference time for deep learning models.

\n\n

It is particularly straightforward to upgrade your Hardware on Hugging Face Spaces. Simply click on the \"Settings\" tab in your Space and choose the Space Hardware you'd like.

\n\n

\"\"

\n\n

While you might need to adapt portions of your machine learning inference code to run on a GPU (here's a handy guide if you are using PyTorch), Gradio is completely agnostic to the choice of hardware and will work completely fine if you use it with CPUs, GPUs, TPUs, or any other hardware!

\n\n

Note: your GPU memory is different than your CPU memory, so if you upgrade your hardware,\nyou might need to adjust the value of the concurrency_count parameter described above.

\n\n

Conclusion

\n\n

Congratulations! You know how to set up a Gradio demo for maximum performance. Good luck on your next viral demo!

\n", "tags": ["QUEUE", "PERFORMANCE"], "spaces": [], "url": "/guides/setting-up-a-demo-for-maximum-performance/", "contributor": null}, {"name": "theming-guide", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 44, "pretty_name": "Theming Guide", "content": "# Theming\n\n\n## Introduction\n\nGradio features a built-in theming engine that lets you customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the `theme=` kwarg to the `Blocks` or `Interface` constructor. For example:\n\n```python\nwith gr.Blocks(theme=gr.themes.Soft()) as demo:\n ...\n```\n\n
\n\n
\n\nGradio comes with a set of prebuilt themes which you can load from `gr.themes.*`. These are:\n\n* `gr.themes.Base()`\n* `gr.themes.Default()`\n* `gr.themes.Glass()`\n* `gr.themes.Monochrome()`\n* `gr.themes.Soft()`\n\nEach of these themes set values for hundreds of CSS variables. You can use prebuilt themes as a starting point for your own custom themes, or you can create your own themes from scratch. Let's take a look at each approach.\n\n## Using the Theme Builder\n\nThe easiest way to build a theme is using the Theme Builder. To launch the Theme Builder locally, run the following code:\n\n```python\nimport gradio as gr\n\ngr.themes.builder()\n```\n\n\n\nYou can use the Theme Builder running on Spaces above, though it runs much faster when you launch it locally via `gr.themes.builder()`. \n\nAs you edit the values in the Theme Builder, the app will preview updates in real time. You can download the code to generate the theme you've created so you can use it in any Gradio app.\n\nIn the rest of the guide, we will cover building themes programmatically.\n\n## Extending Themes via the Constructor\n\nAlthough each theme has hundreds of CSS variables, the values for most these variables are drawn from 8 core variables which can be set through the constructor of each prebuilt theme. Modifying these 8 arguments allows you to quickly change the look and feel of your app.\n\n### Core Colors\n\nThe first 3 constructor arguments set the colors of the theme and are `gradio.themes.Color` objects. Internally, these Color objects hold brightness values for the palette of a single hue, ranging from 50, 100, 200..., 800, 900, 950. Other CSS variables are derived from these 3 colors.\n\nThe 3 color constructor arguments are:\n\n- `primary_hue`: This is the color draws attention in your theme. In the default theme, this is set to `gradio.themes.colors.orange`.\n- `secondary_hue`: This is the color that is used for secondary elements in your theme. In the default theme, this is set to `gradio.themes.colors.blue`.\n- `neutral_hue`: This is the color that is used for text and other neutral elements in your theme. In the default theme, this is set to `gradio.themes.colors.gray`.\n\nYou could modify these values using their string shortcuts, such as\n\n```python\nwith gr.Blocks(theme=gr.themes.Default(primary_hue=\"red\", secondary_hue=\"pink\")) as demo:\n ...\n```\n\nor you could use the `Color` objects directly, like this:\n\n```python\nwith gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.red, secondary_hue=gr.themes.colors.pink)) as demo:\n ...\n```\n
\n\n
\n\nPredefined colors are:\n\n* `slate`\n* `gray`\n* `zinc`\n* `neutral`\n* `stone`\n* `red`\n* `orange`\n* `amber`\n* `yellow`\n* `lime`\n* `green`\n* `emerald`\n* `teal`\n* `cyan`\n* `sky`\n* `blue`\n* `indigo`\n* `violet`\n* `purple`\n* `fuchsia`\n* `pink`\n* `rose`\n\nYou could also create your own custom `Color` objects and pass them in.\n\n### Core Sizing\n\nThe next 3 constructor arguments set the sizing of the theme and are `gradio.themes.Size` objects. Internally, these Size objects hold pixel size values that range from `xxs` to `xxl`. Other CSS variables are derived from these 3 sizes.\n\n- `spacing_size`: This sets the padding within and spacing between elements. In the default theme, this is set to `gradio.themes.sizes.spacing_md`.\n- `radius_size`: This sets the roundedness of corners of elements. In the default theme, this is set to `gradio.themes.sizes.radius_md`.\n- `text_size`: This sets the font size of text. In the default theme, this is set to `gradio.themes.sizes.text_md`.\n\nYou could modify these values using their string shortcuts, such as\n\n```python\nwith gr.Blocks(theme=gr.themes.Default(spacing_size=\"sm\", radius_size=\"none\")) as demo:\n ...\n```\n\nor you could use the `Size` objects directly, like this:\n\n```python\nwith gr.Blocks(theme=gr.themes.Default(spacing_size=gr.themes.sizes.spacing_sm, radius_size=gr.themes.sizes.radius_none)) as demo:\n ...\n```\n
\n\n
\n\nThe predefined size objects are:\n\n* `radius_none`\n* `radius_sm`\n* `radius_md`\n* `radius_lg`\n* `spacing_sm`\n* `spacing_md`\n* `spacing_lg`\n* `text_sm`\n* `text_md`\n* `text_lg`\n\nYou could also create your own custom `Size` objects and pass them in.\n\n### Core Fonts\n\nThe final 2 constructor arguments set the fonts of the theme. You can pass a list of fonts to each of these arguments to specify fallbacks. If you provide a string, it will be loaded as a system font. If you provide a `gradio.themes.GoogleFont`, the font will be loaded from Google Fonts.\n\n- `font`: This sets the primary font of the theme. In the default theme, this is set to `gradio.themes.GoogleFont(\"Source Sans Pro\")`.\n- `font_mono`: This sets the monospace font of the theme. In the default theme, this is set to `gradio.themes.GoogleFont(\"IBM Plex Mono\")`.\n\nYou could modify these values such as the following:\n\n```python\nwith gr.Blocks(theme=gr.themes.Default(font=[gr.themes.GoogleFont(\"Inconsolata\"), \"Arial\", \"sans-serif\"])) as demo:\n ...\n```\n\n
\n\n
\n\n\n## Extending Themes via `.set()`\n\nYou can also modify the values of CSS variables after the theme has been loaded. To do so, use the `.set()` method of the theme object to get access to the CSS variables. For example:\n\n```python\ntheme = gr.themes.Default(primary_hue=\"blue\").set(\n loader_color=\"#FF0000\",\n slider_color=\"#FF0000\",\n)\n\nwith gr.Blocks(theme=theme) as demo:\n ...\n```\n\nIn the example above, we've set the `loader_color` and `slider_color` variables to `#FF0000`, despite the overall `primary_color` using the blue color palette. You can set any CSS variable that is defined in the theme in this manner. \n\nYour IDE type hinting should help you navigate these variables. Since there are so many CSS variables, let's take a look at how these variables are named and organized.\n\n### CSS Variable Naming Conventions\n\nCSS variable names can get quite long, like `button_primary_background_fill_hover_dark`! However they follow a common naming convention that makes it easy to understand what they do and to find the variable you're looking for. Separated by underscores, the variable name is made up of:\n\n1. The target element, such as `button`, `slider`, or `block`.\n2. The target element type or sub-element, such as `button_primary`, or `block_label`.\n3. The property, such as `button_primary_background_fill`, or `block_label_border_width`.\n4. Any relevant state, such as `button_primary_background_fill_hover`.\n5. If the value is different in dark mode, the suffix `_dark`. For example, `input_border_color_focus_dark`.\n\nOf course, many CSS variable names are shorter than this, such as `table_border_color`, or `input_shadow`. \n\n### CSS Variable Organization\n\nThough there are hundreds of CSS variables, they do not all have to have individual values. They draw their values by referencing a set of core variables and referencing each other. This allows us to only have to modify a few variables to change the look and feel of the entire theme, while also getting finer control of individual elements that we may want to modify.\n\n#### Referencing Core Variables\n\nTo reference one of the core constructor variables, precede the variable name with an asterisk. To reference a core color, use the `*primary_`, `*secondary_`, or `*neutral_` prefix, followed by the brightness value. For example:\n\n```python\ntheme = gr.themes.Default(primary_hue=\"blue\").set(\n button_primary_background_fill=\"*primary_200\",\n button_primary_background_fill_hover=\"*primary_300\",\n)\n```\n\nIn the example above, we've set the `button_primary_background_fill` and `button_primary_background_fill_hover` variables to `*primary_200` and `*primary_300`. These variables will be set to the 200 and 300 brightness values of the blue primary color palette, respectively.\n\nSimilarly, to reference a core size, use the `*spacing_`, `*radius_`, or `*text_` prefix, followed by the size value. For example:\n\n```python\ntheme = gr.themes.Default(radius_size=\"md\").set(\n button_primary_border_radius=\"*radius_xl\",\n)\n```\n\nIn the example above, we've set the `button_primary_border_radius` variable to `*radius_xl`. This variable will be set to the `xl` setting of the medium radius size range.\n\n#### Referencing Other Variables\n\nVariables can also reference each other. For example, look at the example below:\n\n```python\ntheme = gr.themes.Default().set(\n button_primary_background_fill=\"#FF0000\",\n button_primary_background_fill_hover=\"#FF0000\",\n button_primary_border=\"#FF0000\",\n)\n```\n\nHaving to set these values to a common color is a bit tedious. Instead, we can reference the `button_primary_background_fill` variable in the `button_primary_background_fill_hover` and `button_primary_border` variables, using a `*` prefix. \n\n```python\ntheme = gr.themes.Default().set(\n button_primary_background_fill=\"#FF0000\",\n button_primary_background_fill_hover=\"*button_primary_background_fill\",\n button_primary_border=\"*button_primary_background_fill\",\n)\n```\n\nNow, if we change the `button_primary_background_fill` variable, the `button_primary_background_fill_hover` and `button_primary_border` variables will automatically update as well.\n\nThis is particularly useful if you intend to share your theme - it makes it easy to modify the theme without having to change every variable.\n\nNote that dark mode variables automatically reference each other. For example:\n\n```python\ntheme = gr.themes.Default().set(\n button_primary_background_fill=\"#FF0000\",\n button_primary_background_fill_dark=\"#AAAAAA\",\n button_primary_border=\"*button_primary_background_fill\",\n button_primary_border_dark=\"*button_primary_background_fill_dark\",\n)\n```\n\n`button_primary_border_dark` will draw its value from `button_primary_background_fill_dark`, because dark mode always draw from the dark version of the variable.\n\n## Creating a Full Theme\n\nLet's say you want to create a theme from scratch! We'll go through it step by step - you can also see the source of prebuilt themes in the gradio source repo for reference - [here's the source](https://github.com/gradio-app/gradio/blob/main/gradio/themes/monochrome.py) for the Monochrome theme.\n\nOur new theme class will inherit from `gradio.themes.Base`, a theme that sets a lot of convenient defaults. Let's make a simple demo that creates a dummy theme called Seafoam, and make a simple app that uses it.\n\n```python\nimport gradio as gr\nfrom gradio.themes.base import Base\nimport time\n\nclass Seafoam(Base):\n pass\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n textbox = gr.Textbox(label=\"Name\")\n slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n with gr.Row():\n button = gr.Button(\"Submit\", variant=\"primary\")\n clear = gr.Button(\"Clear\")\n output = gr.Textbox(label=\"Output\")\n\n def repeat(name, count):\n time.sleep(3)\n return name * count\n \n button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n```\n\n
\n\n
\n\n\nThe Base theme is very barebones, and uses `gr.themes.Blue` as it primary color - you'll note the primary button and the loading animation are both blue as a result. Let's change the defaults core arguments of our app. We'll overwrite the constructor and pass new defaults for the core constructor arguments.\n\nWe'll use `gr.themes.Emerald` as our primary color, and set secondary and neutral hues to `gr.themes.Blue`. We'll make our text larger using `text_lg`. We'll use `Quicksand` as our default font, loaded from Google Fonts. \n\n```python\nfrom __future__ import annotations\nfrom typing import Iterable\nimport gradio as gr\nfrom gradio.themes.base import Base\nfrom gradio.themes.utils import colors, fonts, sizes\nimport time\n\n\nclass Seafoam(Base):\n def __init__(\n self,\n *,\n primary_hue: colors.Color | str = colors.emerald,\n secondary_hue: colors.Color | str = colors.blue,\n neutral_hue: colors.Color | str = colors.gray,\n spacing_size: sizes.Size | str = sizes.spacing_md,\n radius_size: sizes.Size | str = sizes.radius_md,\n text_size: sizes.Size | str = sizes.text_lg,\n font: fonts.Font\n | str\n | Iterable[fonts.Font | str] = (\n fonts.GoogleFont(\"Quicksand\"),\n \"ui-sans-serif\",\n \"sans-serif\",\n ),\n font_mono: fonts.Font\n | str\n | Iterable[fonts.Font | str] = (\n fonts.GoogleFont(\"IBM Plex Mono\"),\n \"ui-monospace\",\n \"monospace\",\n ),\n ):\n super().__init__(\n primary_hue=primary_hue,\n secondary_hue=secondary_hue,\n neutral_hue=neutral_hue,\n spacing_size=spacing_size,\n radius_size=radius_size,\n text_size=text_size,\n font=font,\n font_mono=font_mono,\n )\n\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n textbox = gr.Textbox(label=\"Name\")\n slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n with gr.Row():\n button = gr.Button(\"Submit\", variant=\"primary\")\n clear = gr.Button(\"Clear\")\n output = gr.Textbox(label=\"Output\")\n\n def repeat(name, count):\n time.sleep(3)\n return name * count\n\n button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n\n```\n\n
\n\n
\n\nSee how the primary button and the loading animation are now green? These CSS variables are tied to the `primary_hue` variable. \n\nLet's modify the theme a bit more directly. We'll call the `set()` method to overwrite CSS variable values explicitly. We can use any CSS logic, and reference our core constructor arguments using the `*` prefix.\n\n```python\nfrom __future__ import annotations\nfrom typing import Iterable\nimport gradio as gr\nfrom gradio.themes.base import Base\nfrom gradio.themes.utils import colors, fonts, sizes\nimport time\n\n\nclass Seafoam(Base):\n def __init__(\n self,\n *,\n primary_hue: colors.Color | str = colors.emerald,\n secondary_hue: colors.Color | str = colors.blue,\n neutral_hue: colors.Color | str = colors.blue,\n spacing_size: sizes.Size | str = sizes.spacing_md,\n radius_size: sizes.Size | str = sizes.radius_md,\n text_size: sizes.Size | str = sizes.text_lg,\n font: fonts.Font\n | str\n | Iterable[fonts.Font | str] = (\n fonts.GoogleFont(\"Quicksand\"),\n \"ui-sans-serif\",\n \"sans-serif\",\n ),\n font_mono: fonts.Font\n | str\n | Iterable[fonts.Font | str] = (\n fonts.GoogleFont(\"IBM Plex Mono\"),\n \"ui-monospace\",\n \"monospace\",\n ),\n ):\n super().__init__(\n primary_hue=primary_hue,\n secondary_hue=secondary_hue,\n neutral_hue=neutral_hue,\n spacing_size=spacing_size,\n radius_size=radius_size,\n text_size=text_size,\n font=font,\n font_mono=font_mono,\n )\n super().set(\n body_background_fill=\"repeating-linear-gradient(45deg, *primary_200, *primary_200 10px, *primary_50 10px, *primary_50 20px)\",\n body_background_fill_dark=\"repeating-linear-gradient(45deg, *primary_800, *primary_800 10px, *primary_900 10px, *primary_900 20px)\",\n button_primary_background_fill=\"linear-gradient(90deg, *primary_300, *secondary_400)\",\n button_primary_background_fill_hover=\"linear-gradient(90deg, *primary_200, *secondary_300)\",\n button_primary_text_color=\"white\",\n button_primary_background_fill_dark=\"linear-gradient(90deg, *primary_600, *secondary_800)\",\n slider_color=\"*secondary_300\",\n slider_color_dark=\"*secondary_600\",\n block_title_text_weight=\"600\",\n block_border_width=\"3px\",\n block_shadow=\"*shadow_drop_lg\",\n button_shadow=\"*shadow_drop_lg\",\n button_large_padding=\"32px\",\n )\n\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n textbox = gr.Textbox(label=\"Name\")\n slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n with gr.Row():\n button = gr.Button(\"Submit\", variant=\"primary\")\n clear = gr.Button(\"Clear\")\n output = gr.Textbox(label=\"Output\")\n\n def repeat(name, count):\n time.sleep(3)\n return name * count\n\n button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n\n```\n
\n\n
\n\n\nLook how fun our theme looks now! With just a few variable changes, our theme looks completely different.\n\nYou may find it helpful to explore the [source code of the other prebuilt themes](https://github.com/gradio-app/gradio/blob/main/gradio/themes) to see how they modified the base theme. You can also find your browser's Inspector useful to select elements from the UI and see what CSS variables are being used in the styles panel. \n\n## Sharing Themes\n\nOnce you have created a theme, you can upload it to the HuggingFace Hub to let others view it, use it, and build off of it!\n\n### Uploading a Theme\nThere are two ways to upload a theme, via the theme class instance or the command line. We will cover both of them with the previously created `seafoam` theme.\n\n* Via the class instance\n\nEach theme instance has a method called `push_to_hub` we can use to upload a theme to the HuggingFace hub.\n\n```python\nseafoam.push_to_hub(repo_name=\"seafoam\",\n version=\"0.0.1\",\n\t\t\t\t\thf_token=\"\")\n```\n\n* Via the command line\n\nFirst save the theme to disk\n```python\nseafoam.dump(filename=\"seafoam.json\")\n```\n\nThen use the `upload_theme` command:\n\n```bash\nupload_theme\\\n\"seafoam.json\"\\\n\"seafoam\"\\\n--version \"0.0.1\"\\\n--hf_token \"\"\n```\n\nIn order to upload a theme, you must have a HuggingFace account and pass your [Access Token](https://huggingface.co/docs/huggingface_hub/quick-start#login)\nas the `hf_token` argument. However, if you log in via the [HuggingFace command line](https://huggingface.co/docs/huggingface_hub/quick-start#login) (which comes installed with `gradio`),\nyou can omit the `hf_token` argument.\n\nThe `version` argument lets you specify a valid [semantic version](https://www.geeksforgeeks.org/introduction-semantic-versioning/) string for your theme.\nThat way your users are able to specify which version of your theme they want to use in their apps. This also lets you publish updates to your theme without worrying\nabout changing how previously created apps look. The `version` argument is optional. If omitted, the next patch version is automatically applied.\n\n### Theme Previews\n\nBy calling `push_to_hub` or `upload_theme`, the theme assets will be stored in a [HuggingFace space](https://huggingface.co/docs/hub/spaces-overview).\n\nThe theme preview for our seafoam theme is here: [seafoam preview](https://huggingface.co/spaces/gradio/seafoam).\n\n
\n\n
\n\n### Discovering Themes\n\nThe [Theme Gallery](https://huggingface.co/spaces/gradio/theme-gallery) shows all the public gradio themes. After publishing your theme,\nit will automatically show up in the theme gallery after a couple of minutes. \n\nYou can sort the themes by the number of likes on the space and from most to least recently created as well as toggling themes between light and dark mode.\n\n
\n\n
\n\n### Downloading\nTo use a theme from the hub, use the `from_hub` method on the `ThemeClass` and pass it to your app:\n\n```python\nmy_theme = gr.Theme.from_hub(\"gradio/seafoam\")\n\nwith gr.Blocks(theme=my_theme) as demo:\n ....\n```\n\nYou can also pass the theme string directly to `Blocks` or `Interface` (`gr.Blocks(theme=\"gradio/seafoam\")`)\n\nYou can pin your app to an upstream theme version by using semantic versioning expressions.\n\nFor example, the following would ensure the theme we load from the `seafoam` repo was between versions `0.0.1` and `0.1.0`:\n\n```python\nwith gr.Blocks(theme=\"gradio/seafoam@>=0.0.1,<0.1.0\") as demo:\n ....\n```\n\nEnjoy creating your own themes! If you make one you're proud of, please share it with the world by uploading it to the hub! \nIf you tag us on [Twitter](https://twitter.com/gradio) we can give your theme a shout out! \n\n\n", "html": "

Theming

\n\n

Introduction

\n\n

Gradio features a built-in theming engine that lets you customize the look and feel of your app. You can choose from a variety of themes, or create your own. To do so, pass the theme= kwarg to the Blocks or Interface constructor. For example:

\n\n
with gr.Blocks(theme=gr.themes.Soft()) as demo:\n    ...\n
\n\n
\n\n
\n\n

Gradio comes with a set of prebuilt themes which you can load from gr.themes.*. These are:

\n\n
    \n
  • gr.themes.Base()
  • \n
  • gr.themes.Default()
  • \n
  • gr.themes.Glass()
  • \n
  • gr.themes.Monochrome()
  • \n
  • gr.themes.Soft()
  • \n
\n\n

Each of these themes set values for hundreds of CSS variables. You can use prebuilt themes as a starting point for your own custom themes, or you can create your own themes from scratch. Let's take a look at each approach.

\n\n

Using the Theme Builder

\n\n

The easiest way to build a theme is using the Theme Builder. To launch the Theme Builder locally, run the following code:

\n\n
import gradio as gr\n\ngr.themes.builder()\n
\n\n\n\nYou can use the Theme Builder running on Spaces above, though it runs much faster when you launch it locally via `gr.themes.builder()`. \n\nAs you edit the values in the Theme Builder, the app will preview updates in real time. You can download the code to generate the theme you've created so you can use it in any Gradio app.\n\nIn the rest of the guide, we will cover building themes programmatically.\n\n## Extending Themes via the Constructor\n\nAlthough each theme has hundreds of CSS variables, the values for most these variables are drawn from 8 core variables which can be set through the constructor of each prebuilt theme. Modifying these 8 arguments allows you to quickly change the look and feel of your app.\n\n### Core Colors\n\nThe first 3 constructor arguments set the colors of the theme and are `gradio.themes.Color` objects. Internally, these Color objects hold brightness values for the palette of a single hue, ranging from 50, 100, 200..., 800, 900, 950. Other CSS variables are derived from these 3 colors.\n\nThe 3 color constructor arguments are:\n\n- `primary_hue`: This is the color draws attention in your theme. In the default theme, this is set to `gradio.themes.colors.orange`.\n- `secondary_hue`: This is the color that is used for secondary elements in your theme. In the default theme, this is set to `gradio.themes.colors.blue`.\n- `neutral_hue`: This is the color that is used for text and other neutral elements in your theme. In the default theme, this is set to `gradio.themes.colors.gray`.\n\nYou could modify these values using their string shortcuts, such as\n\n
with gr.Blocks(theme=gr.themes.Default(primary_hue=\"red\", secondary_hue=\"pink\")) as demo:\n    ...\n
\n\nor you could use the `Color` objects directly, like this:\n\n
with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.red, secondary_hue=gr.themes.colors.pink)) as demo:\n    ...\n
\n
\n\n
\n\n

Predefined colors are:

\n\n
    \n
  • slate
  • \n
  • gray
  • \n
  • zinc
  • \n
  • neutral
  • \n
  • stone
  • \n
  • red
  • \n
  • orange
  • \n
  • amber
  • \n
  • yellow
  • \n
  • lime
  • \n
  • green
  • \n
  • emerald
  • \n
  • teal
  • \n
  • cyan
  • \n
  • sky
  • \n
  • blue
  • \n
  • indigo
  • \n
  • violet
  • \n
  • purple
  • \n
  • fuchsia
  • \n
  • pink
  • \n
  • rose
  • \n
\n\n

You could also create your own custom Color objects and pass them in.

\n\n

Core Sizing

\n\n

The next 3 constructor arguments set the sizing of the theme and are gradio.themes.Size objects. Internally, these Size objects hold pixel size values that range from xxs to xxl. Other CSS variables are derived from these 3 sizes.

\n\n
    \n
  • spacing_size: This sets the padding within and spacing between elements. In the default theme, this is set to gradio.themes.sizes.spacing_md.
  • \n
  • radius_size: This sets the roundedness of corners of elements. In the default theme, this is set to gradio.themes.sizes.radius_md.
  • \n
  • text_size: This sets the font size of text. In the default theme, this is set to gradio.themes.sizes.text_md.
  • \n
\n\n

You could modify these values using their string shortcuts, such as

\n\n
with gr.Blocks(theme=gr.themes.Default(spacing_size=\"sm\", radius_size=\"none\")) as demo:\n    ...\n
\n\nor you could use the `Size` objects directly, like this:\n\n
with gr.Blocks(theme=gr.themes.Default(spacing_size=gr.themes.sizes.spacing_sm, radius_size=gr.themes.sizes.radius_none)) as demo:\n    ...\n
\n
\n\n
\n\n

The predefined size objects are:

\n\n
    \n
  • radius_none
  • \n
  • radius_sm
  • \n
  • radius_md
  • \n
  • radius_lg
  • \n
  • spacing_sm
  • \n
  • spacing_md
  • \n
  • spacing_lg
  • \n
  • text_sm
  • \n
  • text_md
  • \n
  • text_lg
  • \n
\n\n

You could also create your own custom Size objects and pass them in.

\n\n

Core Fonts

\n\n

The final 2 constructor arguments set the fonts of the theme. You can pass a list of fonts to each of these arguments to specify fallbacks. If you provide a string, it will be loaded as a system font. If you provide a gradio.themes.GoogleFont, the font will be loaded from Google Fonts.

\n\n
    \n
  • font: This sets the primary font of the theme. In the default theme, this is set to gradio.themes.GoogleFont(\"Source Sans Pro\").
  • \n
  • font_mono: This sets the monospace font of the theme. In the default theme, this is set to gradio.themes.GoogleFont(\"IBM Plex Mono\").
  • \n
\n\n

You could modify these values such as the following:

\n\n
with gr.Blocks(theme=gr.themes.Default(font=[gr.themes.GoogleFont(\"Inconsolata\"), \"Arial\", \"sans-serif\"])) as demo:\n    ...\n
\n\n
\n\n
\n\n

Extending Themes via .set()

\n\n

You can also modify the values of CSS variables after the theme has been loaded. To do so, use the .set() method of the theme object to get access to the CSS variables. For example:

\n\n
theme = gr.themes.Default(primary_hue=\"blue\").set(\n    loader_color=\"#FF0000\",\n    slider_color=\"#FF0000\",\n)\n\nwith gr.Blocks(theme=theme) as demo:\n    ...\n
\n\nIn the example above, we've set the `loader_color` and `slider_color` variables to `#FF0000`, despite the overall `primary_color` using the blue color palette. You can set any CSS variable that is defined in the theme in this manner. \n\nYour IDE type hinting should help you navigate these variables. Since there are so many CSS variables, let's take a look at how these variables are named and organized.\n\n### CSS Variable Naming Conventions\n\nCSS variable names can get quite long, like `button_primary_background_fill_hover_dark`! However they follow a common naming convention that makes it easy to understand what they do and to find the variable you're looking for. Separated by underscores, the variable name is made up of:\n\n1. The target element, such as `button`, `slider`, or `block`.\n2. The target element type or sub-element, such as `button_primary`, or `block_label`.\n3. The property, such as `button_primary_background_fill`, or `block_label_border_width`.\n4. Any relevant state, such as `button_primary_background_fill_hover`.\n5. If the value is different in dark mode, the suffix `_dark`. For example, `input_border_color_focus_dark`.\n\nOf course, many CSS variable names are shorter than this, such as `table_border_color`, or `input_shadow`. \n\n### CSS Variable Organization\n\nThough there are hundreds of CSS variables, they do not all have to have individual values. They draw their values by referencing a set of core variables and referencing each other. This allows us to only have to modify a few variables to change the look and feel of the entire theme, while also getting finer control of individual elements that we may want to modify.\n\n#### Referencing Core Variables\n\nTo reference one of the core constructor variables, precede the variable name with an asterisk. To reference a core color, use the `*primary_`, `*secondary_`, or `*neutral_` prefix, followed by the brightness value. For example:\n\n
theme = gr.themes.Default(primary_hue=\"blue\").set(\n    button_primary_background_fill=\"*primary_200\",\n    button_primary_background_fill_hover=\"*primary_300\",\n)\n
\n\nIn the example above, we've set the `button_primary_background_fill` and `button_primary_background_fill_hover` variables to `*primary_200` and `*primary_300`. These variables will be set to the 200 and 300 brightness values of the blue primary color palette, respectively.\n\nSimilarly, to reference a core size, use the `*spacing_`, `*radius_`, or `*text_` prefix, followed by the size value. For example:\n\n
theme = gr.themes.Default(radius_size=\"md\").set(\n    button_primary_border_radius=\"*radius_xl\",\n)\n
\n\nIn the example above, we've set the `button_primary_border_radius` variable to `*radius_xl`. This variable will be set to the `xl` setting of the medium radius size range.\n\n#### Referencing Other Variables\n\nVariables can also reference each other. For example, look at the example below:\n\n
theme = gr.themes.Default().set(\n    button_primary_background_fill=\"#FF0000\",\n    button_primary_background_fill_hover=\"#FF0000\",\n    button_primary_border=\"#FF0000\",\n)\n
\n\nHaving to set these values to a common color is a bit tedious. Instead, we can reference the `button_primary_background_fill` variable in the `button_primary_background_fill_hover` and `button_primary_border` variables, using a `*` prefix. \n\n
theme = gr.themes.Default().set(\n    button_primary_background_fill=\"#FF0000\",\n    button_primary_background_fill_hover=\"*button_primary_background_fill\",\n    button_primary_border=\"*button_primary_background_fill\",\n)\n
\n\nNow, if we change the `button_primary_background_fill` variable, the `button_primary_background_fill_hover` and `button_primary_border` variables will automatically update as well.\n\nThis is particularly useful if you intend to share your theme - it makes it easy to modify the theme without having to change every variable.\n\nNote that dark mode variables automatically reference each other. For example:\n\n
theme = gr.themes.Default().set(\n    button_primary_background_fill=\"#FF0000\",\n    button_primary_background_fill_dark=\"#AAAAAA\",\n    button_primary_border=\"*button_primary_background_fill\",\n    button_primary_border_dark=\"*button_primary_background_fill_dark\",\n)\n
\n\n`button_primary_border_dark` will draw its value from `button_primary_background_fill_dark`, because dark mode always draw from the dark version of the variable.\n\n## Creating a Full Theme\n\nLet's say you want to create a theme from scratch! We'll go through it step by step - you can also see the source of prebuilt themes in the gradio source repo for reference - [here's the source](https://github.com/gradio-app/gradio/blob/main/gradio/themes/monochrome.py) for the Monochrome theme.\n\nOur new theme class will inherit from `gradio.themes.Base`, a theme that sets a lot of convenient defaults. Let's make a simple demo that creates a dummy theme called Seafoam, and make a simple app that uses it.\n\n
import gradio as gr\nfrom gradio.themes.base import Base\nimport time\n\nclass Seafoam(Base):\n    pass\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n    textbox = gr.Textbox(label=\"Name\")\n    slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n    with gr.Row():\n        button = gr.Button(\"Submit\", variant=\"primary\")\n        clear = gr.Button(\"Clear\")\n    output = gr.Textbox(label=\"Output\")\n\n    def repeat(name, count):\n        time.sleep(3)\n        return name * count\n\n    button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n
\n\n
\n\n
\n\n

The Base theme is very barebones, and uses gr.themes.Blue as it primary color - you'll note the primary button and the loading animation are both blue as a result. Let's change the defaults core arguments of our app. We'll overwrite the constructor and pass new defaults for the core constructor arguments.

\n\n

We'll use gr.themes.Emerald as our primary color, and set secondary and neutral hues to gr.themes.Blue. We'll make our text larger using text_lg. We'll use Quicksand as our default font, loaded from Google Fonts.

\n\n
from __future__ import annotations\nfrom typing import Iterable\nimport gradio as gr\nfrom gradio.themes.base import Base\nfrom gradio.themes.utils import colors, fonts, sizes\nimport time\n\n\nclass Seafoam(Base):\n    def __init__(\n        self,\n        *,\n        primary_hue: colors.Color | str = colors.emerald,\n        secondary_hue: colors.Color | str = colors.blue,\n        neutral_hue: colors.Color | str = colors.gray,\n        spacing_size: sizes.Size | str = sizes.spacing_md,\n        radius_size: sizes.Size | str = sizes.radius_md,\n        text_size: sizes.Size | str = sizes.text_lg,\n        font: fonts.Font\n        | str\n        | Iterable[fonts.Font | str] = (\n            fonts.GoogleFont(\"Quicksand\"),\n            \"ui-sans-serif\",\n            \"sans-serif\",\n        ),\n        font_mono: fonts.Font\n        | str\n        | Iterable[fonts.Font | str] = (\n            fonts.GoogleFont(\"IBM Plex Mono\"),\n            \"ui-monospace\",\n            \"monospace\",\n        ),\n    ):\n        super().__init__(\n            primary_hue=primary_hue,\n            secondary_hue=secondary_hue,\n            neutral_hue=neutral_hue,\n            spacing_size=spacing_size,\n            radius_size=radius_size,\n            text_size=text_size,\n            font=font,\n            font_mono=font_mono,\n        )\n\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n    textbox = gr.Textbox(label=\"Name\")\n    slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n    with gr.Row():\n        button = gr.Button(\"Submit\", variant=\"primary\")\n        clear = gr.Button(\"Clear\")\n    output = gr.Textbox(label=\"Output\")\n\n    def repeat(name, count):\n        time.sleep(3)\n        return name * count\n\n    button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n\n
\n\n
\n\n
\n\n

See how the primary button and the loading animation are now green? These CSS variables are tied to the primary_hue variable.

\n\n

Let's modify the theme a bit more directly. We'll call the set() method to overwrite CSS variable values explicitly. We can use any CSS logic, and reference our core constructor arguments using the * prefix.

\n\n
from __future__ import annotations\nfrom typing import Iterable\nimport gradio as gr\nfrom gradio.themes.base import Base\nfrom gradio.themes.utils import colors, fonts, sizes\nimport time\n\n\nclass Seafoam(Base):\n    def __init__(\n        self,\n        *,\n        primary_hue: colors.Color | str = colors.emerald,\n        secondary_hue: colors.Color | str = colors.blue,\n        neutral_hue: colors.Color | str = colors.blue,\n        spacing_size: sizes.Size | str = sizes.spacing_md,\n        radius_size: sizes.Size | str = sizes.radius_md,\n        text_size: sizes.Size | str = sizes.text_lg,\n        font: fonts.Font\n        | str\n        | Iterable[fonts.Font | str] = (\n            fonts.GoogleFont(\"Quicksand\"),\n            \"ui-sans-serif\",\n            \"sans-serif\",\n        ),\n        font_mono: fonts.Font\n        | str\n        | Iterable[fonts.Font | str] = (\n            fonts.GoogleFont(\"IBM Plex Mono\"),\n            \"ui-monospace\",\n            \"monospace\",\n        ),\n    ):\n        super().__init__(\n            primary_hue=primary_hue,\n            secondary_hue=secondary_hue,\n            neutral_hue=neutral_hue,\n            spacing_size=spacing_size,\n            radius_size=radius_size,\n            text_size=text_size,\n            font=font,\n            font_mono=font_mono,\n        )\n        super().set(\n            body_background_fill=\"repeating-linear-gradient(45deg, *primary_200, *primary_200 10px, *primary_50 10px, *primary_50 20px)\",\n            body_background_fill_dark=\"repeating-linear-gradient(45deg, *primary_800, *primary_800 10px, *primary_900 10px, *primary_900 20px)\",\n            button_primary_background_fill=\"linear-gradient(90deg, *primary_300, *secondary_400)\",\n            button_primary_background_fill_hover=\"linear-gradient(90deg, *primary_200, *secondary_300)\",\n            button_primary_text_color=\"white\",\n            button_primary_background_fill_dark=\"linear-gradient(90deg, *primary_600, *secondary_800)\",\n            slider_color=\"*secondary_300\",\n            slider_color_dark=\"*secondary_600\",\n            block_title_text_weight=\"600\",\n            block_border_width=\"3px\",\n            block_shadow=\"*shadow_drop_lg\",\n            button_shadow=\"*shadow_drop_lg\",\n            button_large_padding=\"32px\",\n        )\n\n\nseafoam = Seafoam()\n\nwith gr.Blocks(theme=seafoam) as demo:\n    textbox = gr.Textbox(label=\"Name\")\n    slider = gr.Slider(label=\"Count\", minimum=0, maximum=100, step=1)\n    with gr.Row():\n        button = gr.Button(\"Submit\", variant=\"primary\")\n        clear = gr.Button(\"Clear\")\n    output = gr.Textbox(label=\"Output\")\n\n    def repeat(name, count):\n        time.sleep(3)\n        return name * count\n\n    button.click(repeat, [textbox, slider], output)\n\ndemo.launch()\n\n
\n
\n\n
\n\n

Look how fun our theme looks now! With just a few variable changes, our theme looks completely different.

\n\n

You may find it helpful to explore the source code of the other prebuilt themes to see how they modified the base theme. You can also find your browser's Inspector useful to select elements from the UI and see what CSS variables are being used in the styles panel.

\n\n

Sharing Themes

\n\n

Once you have created a theme, you can upload it to the HuggingFace Hub to let others view it, use it, and build off of it!

\n\n

Uploading a Theme

\n\n

There are two ways to upload a theme, via the theme class instance or the command line. We will cover both of them with the previously created seafoam theme.

\n\n
    \n
  • Via the class instance
  • \n
\n\n

Each theme instance has a method called push_to_hub we can use to upload a theme to the HuggingFace hub.

\n\n
seafoam.push_to_hub(repo_name=\"seafoam\",\n                    version=\"0.0.1\",\n                    hf_token=\"\")\n
\n\n* Via the command line\n\nFirst save the theme to disk\n
seafoam.dump(filename=\"seafoam.json\")\n
\n\nThen use the `upload_theme` command:\n\n
upload_theme\\\n\"seafoam.json\"\\\n\"seafoam\"\\\n--version \"0.0.1\"\\\n--hf_token \"\"\n
\n\nIn order to upload a theme, you must have a HuggingFace account and pass your [Access Token](https://huggingface.co/docs/huggingface_hub/quick-start#login)\nas the `hf_token` argument. However, if you log in via the [HuggingFace command line](https://huggingface.co/docs/huggingface_hub/quick-start#login) (which comes installed with `gradio`),\nyou can omit the `hf_token` argument.\n\nThe `version` argument lets you specify a valid [semantic version](https://www.geeksforgeeks.org/introduction-semantic-versioning/) string for your theme.\nThat way your users are able to specify which version of your theme they want to use in their apps. This also lets you publish updates to your theme without worrying\nabout changing how previously created apps look. The `version` argument is optional. If omitted, the next patch version is automatically applied.\n\n### Theme Previews\n\nBy calling `push_to_hub` or `upload_theme`, the theme assets will be stored in a [HuggingFace space](https://huggingface.co/docs/hub/spaces-overview).\n\nThe theme preview for our seafoam theme is here: [seafoam preview](https://huggingface.co/spaces/gradio/seafoam).\n\n
\n\n
\n\n

Discovering Themes

\n\n

The Theme Gallery shows all the public gradio themes. After publishing your theme,\nit will automatically show up in the theme gallery after a couple of minutes.

\n\n

You can sort the themes by the number of likes on the space and from most to least recently created as well as toggling themes between light and dark mode.

\n\n
\n\n
\n\n

Downloading

\n\n

To use a theme from the hub, use the from_hub method on the ThemeClass and pass it to your app:

\n\n
my_theme = gr.Theme.from_hub(\"gradio/seafoam\")\n\nwith gr.Blocks(theme=my_theme) as demo:\n    ....\n
\n\n

You can also pass the theme string directly to Blocks or Interface (gr.Blocks(theme=\"gradio/seafoam\"))

\n\n

You can pin your app to an upstream theme version by using semantic versioning expressions.

\n\n

For example, the following would ensure the theme we load from the seafoam repo was between versions 0.0.1 and 0.1.0:

\n\n
with gr.Blocks(theme=\"gradio/seafoam@>=0.0.1,<0.1.0\") as demo:\n    ....\n
\n\n

Enjoy creating your own themes! If you make one you're proud of, please share it with the world by uploading it to the hub! \nIf you tag us on Twitter we can give your theme a shout out!

\n\n

\n", "tags": ["THEMES"], "spaces": [], "url": "/guides/theming-guide/", "contributor": null}, {"name": "using-flagging", "category": "other-tutorials", "pretty_category": "Other Tutorials", "guide_index": null, "absolute_index": 45, "pretty_name": "Using Flagging", "content": "# Using Flagging\n\n\n\n\n## Introduction\n\nWhen you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.\n\nGradio simplifies the collection of this data by including a **Flag** button with every `Interface`. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with `gradio.Interface` as well as with `gradio.Blocks`.\n\n## The **Flag** button in `gradio.Interface`\n\nFlagging with Gradio's `Interface` is especially easy. By default, underneath the output components, there is a button marked **Flag**. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.\n\nThere are [four parameters](https://gradio.app/docs/#interface-header) in `gradio.Interface` that control how flagging works. We will go over them in greater detail.\n\n* `allow_flagging`: this parameter can be set to either `\"manual\"` (default), `\"auto\"`, or `\"never\"`. \n * `manual`: users will see a button to flag, and samples are only flagged when the button is clicked.\n * `auto`: users will not see a button to flag, but every sample will be flagged automatically. \n * `never`: users will not see a button to flag, and no sample will be flagged. \n* `flagging_options`: this parameter can be either `None` (default) or a list of strings.\n * If `None`, then the user simply clicks on the **Flag** button and no additional options are shown.\n * If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is `[\"Incorrect\", \"Ambiguous\"]`, then buttons labeled **Flag as Incorrect** and **Flag as Ambiguous** appear. This only applies if `allow_flagging` is `\"manual\"`.\n * The chosen option is then logged along with the input and output.\n* `flagging_dir`: this parameter takes a string.\n * It represents what to name the directory where flagged data is stored.\n* `flagging_callback`: this parameter takes an instance of a subclass of the `FlaggingCallback` class\n * Using this parameter allows you to write custom code that gets run when the flag button is clicked\n * By default, this is set to an instance of `gr.CSVLogger`\n * One example is setting it to an instance of `gr.HuggingFaceDatasetSaver` which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)\n\n## What happens to flagged data?\n\nWithin the directory provided by the `flagging_dir` argument, a CSV file will log the flagged data. \n\nHere's an example: The code below creates the calculator interface embedded below it:\n\n```python\nimport gradio as gr\n\n\ndef calculator(num1, operation, num2):\n if operation == \"add\":\n return num1 + num2\n elif operation == \"subtract\":\n return num1 - num2\n elif operation == \"multiply\":\n return num1 * num2\n elif operation == \"divide\":\n return num1 / num2\n\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\"\n)\n\niface.launch()\n```\n\n\n\nWhen you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged. \n\n```directory\n+-- flagged/\n| +-- logs.csv\n```\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n```\n\nIf the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an `image` input to `image` output interface will create the following structure.\n\n```directory\n+-- flagged/\n| +-- logs.csv\n| +-- image/\n| | +-- 0.png\n| | +-- 1.png\n| +-- Output/\n| | +-- 0.png\n| | +-- 1.png\n```\n_flagged/logs.csv_\n```csv\nim,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n```\n\nIf you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.\n\nIf we go back to the calculator example, the following code will create the interface embedded below it. \n```python\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n```\n\n\nWhen users click the flag button, the csv file will now include a column indicating the selected option.\n\n_flagged/logs.csv_\n```csv\nnum1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n```\n\n## The HuggingFaceDatasetSaver Callback\n\nSometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.\n\nWe've made this super easy with the `flagging_callback` parameter.\n\nFor example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:\n\n\n```python\nimport os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n calculator,\n [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n \"number\",\n description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n allow_flagging=\"manual\",\n flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n flagging_callback=hf_writer\n)\n\niface.launch()\n```\n\nNotice that we define our own \ninstance of `gradio.HuggingFaceDatasetSaver` using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set `allow_flagging=\"manual\"`\nbecause on Hugging Face Spaces, `allow_flagging` is set to `\"never\"` by default. Here's our demo:\n\n\n\nYou can now see all the examples flagged above in this [public Hugging Face dataset](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo).\n\n![flagging callback hf](https://github.com/gradio-app/gradio/blob/main/guides/assets/flagging-callback-hf.png?raw=true)\n\nWe created the `gradio.HuggingFaceDatasetSaver` class, but you can pass your own custom class as long as it inherits from `FLaggingCallback` defined in [this file](https://github.com/gradio-app/gradio/blob/master/gradio/flagging.py). If you create a cool callback, contribute it to the repo! \n\n## Flagging with Blocks\n\nWhat about if you are using `gradio.Blocks`? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.\n\nAt the same time, you might want to use an existing `FlaggingCallback` to avoid writing extra code.\nThis requires two steps:\n\n1. You have to run your callback's `.setup()` somewhere in the code prior to the \nfirst time you flag data\n2. When the flagging button is clicked, then you trigger the callback's `.flag()` method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing. \n\nHere is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default `CSVLogger`:\n\n```python\nimport numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n sepia_filter = strength * np.array(\n [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n ) + (1-strength) * np.identity(3)\n sepia_img = input_img.dot(sepia_filter.T)\n sepia_img /= sepia_img.max()\n return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n img_input = gr.Image()\n strength = gr.Slider(0, 1, 0.5)\n img_output = gr.Image()\n with gr.Row():\n btn = gr.Button(\"Flag\")\n \n # This needs to be called at some point prior to the first call to callback.flag()\n callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n img_input.change(sepia, [img_input, strength], img_output)\n strength.change(sepia, [img_input, strength], img_output)\n \n # We can choose which components to flag -- in this case, we'll flag all of them\n btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n```\n\n\n## Privacy\n\nImportant Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use `allow_flagging=auto` (when all of the data submitted through the demo is being flagged)\n\n### That's all! Happy building :) \n", "html": "

Using Flagging

\n\n

Introduction

\n\n

When you demo a machine learning model, you might want to collect data from users who try the model, particularly data points in which the model is not behaving as expected. Capturing these \"hard\" data points is valuable because it allows you to improve your machine learning model and make it more reliable and robust.

\n\n

Gradio simplifies the collection of this data by including a Flag button with every Interface. This allows a user or tester to easily send data back to the machine where the demo is running. In this Guide, we discuss more about how to use the flagging feature, both with gradio.Interface as well as with gradio.Blocks.

\n\n

The Flag button in gradio.Interface

\n\n

Flagging with Gradio's Interface is especially easy. By default, underneath the output components, there is a button marked Flag. When a user testing your model sees input with interesting output, they can click the flag button to send the input and output data back to the machine where the demo is running. The sample is saved to a CSV log file (by default). If the demo involves images, audio, video, or other types of files, these are saved separately in a parallel directory and the paths to these files are saved in the CSV file.

\n\n

There are four parameters in gradio.Interface that control how flagging works. We will go over them in greater detail.

\n\n
    \n
  • allow_flagging: this parameter can be set to either \"manual\" (default), \"auto\", or \"never\".
    \n
      \n
    • manual: users will see a button to flag, and samples are only flagged when the button is clicked.
    • \n
    • auto: users will not see a button to flag, but every sample will be flagged automatically.
    • \n
    • never: users will not see a button to flag, and no sample will be flagged.
    • \n
  • \n
  • flagging_options: this parameter can be either None (default) or a list of strings.\n
      \n
    • If None, then the user simply clicks on the Flag button and no additional options are shown.
    • \n
    • If a list of strings are provided, then the user sees several buttons, corresponding to each of the strings that are provided. For example, if the value of this parameter is [\"Incorrect\", \"Ambiguous\"], then buttons labeled Flag as Incorrect and Flag as Ambiguous appear. This only applies if allow_flagging is \"manual\".
    • \n
    • The chosen option is then logged along with the input and output.
    • \n
  • \n
  • flagging_dir: this parameter takes a string.\n
      \n
    • It represents what to name the directory where flagged data is stored.
    • \n
  • \n
  • flagging_callback: this parameter takes an instance of a subclass of the FlaggingCallback class\n
      \n
    • Using this parameter allows you to write custom code that gets run when the flag button is clicked
    • \n
    • By default, this is set to an instance of gr.CSVLogger
    • \n
    • One example is setting it to an instance of gr.HuggingFaceDatasetSaver which can allow you to pipe any flagged data into a HuggingFace Dataset. (See more below.)
    • \n
  • \n
\n\n

What happens to flagged data?

\n\n

Within the directory provided by the flagging_dir argument, a CSV file will log the flagged data.

\n\n

Here's an example: The code below creates the calculator interface embedded below it:

\n\n
import gradio as gr\n\n\ndef calculator(num1, operation, num2):\n    if operation == \"add\":\n        return num1 + num2\n    elif operation == \"subtract\":\n        return num1 - num2\n    elif operation == \"multiply\":\n        return num1 * num2\n    elif operation == \"divide\":\n        return num1 / num2\n\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\"\n)\n\niface.launch()\n
\n\n

\n\n

When you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged.

\n\n
+-- flagged/\n|   +-- logs.csv\n
\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,timestamp\n5,add,7,12,2022-01-31 11:40:51.093412\n6,subtract,1.5,4.5,2022-01-31 03:25:32.023542\n
\n\n

If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an image input to image output interface will create the following structure.

\n\n
+-- flagged/\n|   +-- logs.csv\n|   +-- image/\n|   |   +-- 0.png\n|   |   +-- 1.png\n|   +-- Output/\n|   |   +-- 0.png\n|   |   +-- 1.png\n
\n\n

flagged/logs.csv

\n\n
im,Output timestamp\nim/0.png,Output/0.png,2022-02-04 19:49:58.026963\nim/1.png,Output/1.png,2022-02-02 10:40:51.093412\n
\n\n

If you wish for the user to provide a reason for flagging, you can pass a list of strings to the flagging_options argument of Interface. Users will have to select one of these choices when flagging, and the option will be saved as an additional column to the CSV.

\n\n

If we go back to the calculator example, the following code will create the interface embedded below it.

\n\n
iface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"]\n)\n\niface.launch()\n
\n\n

\n\n

When users click the flag button, the csv file will now include a column indicating the selected option.

\n\n

flagged/logs.csv

\n\n
num1,operation,num2,Output,flag,timestamp\n5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412\n6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512\n
\n\n

The HuggingFaceDatasetSaver Callback

\n\n

Sometimes, saving the data to a local CSV file doesn't make sense. For example, on Hugging Face\nSpaces, developers typically don't have access to the underlying ephemeral machine hosting the Gradio\ndemo. That's why, by default, flagging is turned off in Hugging Face Space. However,\nyou may want to do something else with the flagged data.

\n\n

We've made this super easy with the flagging_callback parameter.

\n\n

For example, below we're going to pipe flagged data from our calculator example into a Hugging Face Dataset, e.g. so that we can build a \"crowd-sourced\" dataset:

\n\n
import os\n\nHF_TOKEN = os.getenv('HF_TOKEN')\nhf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, \"crowdsourced-calculator-demo\")\n\niface = gr.Interface(\n    calculator,\n    [\"number\", gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]), \"number\"],\n    \"number\",\n    description=\"Check out the crowd-sourced dataset at: [https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo)\",\n    allow_flagging=\"manual\",\n    flagging_options=[\"wrong sign\", \"off by one\", \"other\"],\n    flagging_callback=hf_writer\n)\n\niface.launch()\n
\n\n

Notice that we define our own \ninstance of gradio.HuggingFaceDatasetSaver using our Hugging Face token and\nthe name of a dataset we'd like to save samples to. In addition, we also set allow_flagging=\"manual\"\nbecause on Hugging Face Spaces, allow_flagging is set to \"never\" by default. Here's our demo:

\n\n

\n\n

You can now see all the examples flagged above in this public Hugging Face dataset.

\n\n

\"flagging

\n\n

We created the gradio.HuggingFaceDatasetSaver class, but you can pass your own custom class as long as it inherits from FLaggingCallback defined in this file. If you create a cool callback, contribute it to the repo!

\n\n

Flagging with Blocks

\n\n

What about if you are using gradio.Blocks? On one hand, you have even more flexibility\nwith Blocks -- you can write whatever Python code you want to run when a button is clicked,\nand assign that using the built-in events in Blocks.

\n\n

At the same time, you might want to use an existing FlaggingCallback to avoid writing extra code.\nThis requires two steps:

\n\n
    \n
  1. You have to run your callback's .setup() somewhere in the code prior to the \nfirst time you flag data
  2. \n
  3. When the flagging button is clicked, then you trigger the callback's .flag() method,\nmaking sure to collect the arguments correctly and disabling the typical preprocessing.
  4. \n
\n\n

Here is an example with an image sepia filter Blocks demo that lets you flag\ndata using the default CSVLogger:

\n\n
import numpy as np\nimport gradio as gr\n\ndef sepia(input_img, strength):\n    sepia_filter = strength * np.array(\n        [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n    ) + (1-strength) * np.identity(3)\n    sepia_img = input_img.dot(sepia_filter.T)\n    sepia_img /= sepia_img.max()\n    return sepia_img\n\ncallback = gr.CSVLogger()\n\nwith gr.Blocks() as demo:\n    with gr.Row():\n        with gr.Column():\n            img_input = gr.Image()\n            strength = gr.Slider(0, 1, 0.5)\n        img_output = gr.Image()\n    with gr.Row():\n        btn = gr.Button(\"Flag\")\n\n    # This needs to be called at some point prior to the first call to callback.flag()\n    callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n\n    img_input.change(sepia, [img_input, strength], img_output)\n    strength.change(sepia, [img_input, strength], img_output)\n\n    # We can choose which components to flag -- in this case, we'll flag all of them\n    btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n\ndemo.launch()\n\n
\n\n

\n\n

Privacy

\n\n

Important Note: please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use allow_flagging=auto (when all of the data submitted through the demo is being flagged)

\n\n

That's all! Happy building :)

\n", "tags": ["FLAGGING", "DATA"], "spaces": ["https://huggingface.co/spaces/gradio/calculator-flagging-crowdsourced", "https://huggingface.co/spaces/gradio/calculator-flagging-options", "https://huggingface.co/spaces/gradio/calculator-flag-basic"], "url": "/guides/using-flagging/", "contributor": null}]}]} \ No newline at end of file diff --git a/js/_website/src/routes/version.json b/js/_website/src/routes/version.json index 1c07274516f4a..122e6ea9d1aa5 100644 --- a/js/_website/src/routes/version.json +++ b/js/_website/src/routes/version.json @@ -1 +1 @@ -{ "version": "3.39.0" } +{"version": "3.39.0"} \ No newline at end of file