diff --git a/cspell.json b/cspell.json index 15028cde4cc..abb83dd527d 100644 --- a/cspell.json +++ b/cspell.json @@ -1613,7 +1613,8 @@ "voteField", "ampx", "autodetection", - "jamba" + "jamba", + "knowledgebases" ], "flagWords": ["hte", "full-stack", "Full-stack", "Full-Stack", "sudo"], "patterns": [ diff --git a/src/pages/[platform]/ai/conversation/context/index.mdx b/src/pages/[platform]/ai/conversation/context/index.mdx new file mode 100644 index 00000000000..fac4cd0ac69 --- /dev/null +++ b/src/pages/[platform]/ai/conversation/context/index.mdx @@ -0,0 +1,182 @@ +import { getCustomStaticPath } from "@/utils/getCustomStaticPath"; + +export const meta = { + title: "Context", + description: + "How to pass client-side context to the LLM to help it respond.", + platforms: [ + "javascript", + "react-native", + "angular", + "nextjs", + "react", + "vue", + ], +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta, + showBreadcrumbs: false, + }, + }; +} + + + +For LLMs to provide high-quality answers to users' questions, they need to have the right information. Sometimes this information is contextual, based on the user or the state of the application. To allow for this, you can send `aiContext` with any user message to the LLM, which can be any unstructured or structured data that might be useful. + + + +```ts +import { generateClient } from "aws-amplify/data"; +import type { Schema } from "../amplify/data/resource"; + +const client = generateClient({ authMode: 'userPool' }); + +const { data: conversation } = await client.conversations.chat.create(); + +conversation.sendMessage({ + content: [{ text: "hello" }], + // aiContext can be any shape + aiContext: { + username: "danny" + } +}) +``` + + + + + + +```tsx +export default function Chat() { + const [ + { + data: { messages }, + isLoading, + }, + sendMessage, + ] = useAIConversation('chat'); + + function handleSendMessage(message) { + sendMessage({ + ...message, + // this can be any object that can be stringified + aiContext: { + currentTime: new Date().toLocaleTimeString() + } + }) + } + + return ( + //... + ) +} +``` + + + + + + +```tsx +function Chat() { + const [ + { + data: { messages }, + isLoading, + }, + sendMessage, + ] = useAIConversation('chat'); + + return ( + { + return { + currentTime: new Date().toLocaleTimeString(), + }; + }} + /> + ); +} +``` + + +The function passed to the `aiContext` prop will be run immediately before the request is sent in order to get the most up to date information. + +You can use React context or other state management systems to update the data passed to `aiContext`. Using React context we can provide more information about the current state of the application: + +```tsx +// Create a context to share state across components +const DataContext = React.createContext<{ + data: any; + setData: (value: React.SetStateAction) => void; +}>({ data: {}, setData: () => {} }); + +// Create a component that updates the shared state +function Counter() { + const { data, setData } = React.useContext(AIContext); + const count = data.count ?? 0; + return ( + + ); +} + +// reference shared data in aiContext +function Chat() { + const { data } = React.useContext(DataContext); + const [ + { + data: { messages }, + isLoading, + }, + sendMessage, + ] = useAIConversation('pirateChat'); + + return ( + { + return { + ...data, + currentTime: new Date().toLocaleTimeString(), + }; + }} + /> + ); +} + +export default function Example() { + const [data, setData] = React.useState({}); + return ( + + + + + + + ) +} +``` + + + diff --git a/src/pages/[platform]/ai/conversation/history/index.mdx b/src/pages/[platform]/ai/conversation/history/index.mdx new file mode 100644 index 00000000000..f19fba4457b --- /dev/null +++ b/src/pages/[platform]/ai/conversation/history/index.mdx @@ -0,0 +1,107 @@ +import { getCustomStaticPath } from "@/utils/getCustomStaticPath"; + +export const meta = { + title: "Conversation History", + description: + "Learn how Amplify AI kit takes care of conversation history", + platforms: [ + "javascript", + "react-native", + "angular", + "nextjs", + "react", + "vue", + ], +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta, + showBreadcrumbs: false, + }, + }; +} + +The Amplify AI kit automatically and securely stores conversation history per user so you can easily resume past conversations. + + + +If you are looking for a quick way to get stared with conversation history, [this example project](https://github.com/aws-samples/amplify-ai-examples/tree/main/claude-ai) has a similar interface to ChatGPT or Claude where users see past conversations in a sidebar they can manage. + + + +When you define a conversation route in your Amplify data schema, the Amplify AI kit turns that into 2 data models: `Conversation` and `Message`. The `Conversation` model functions mostly the same way as other data models defined in your schema. You can list and filter them (because they use owner-based authorization users will only see their conversations) and you can get a specific conversation by ID. Then once you have a conversation instance you can load the messages in it if there are any, send messages to it, and subscribe to the stream events being sent back. + + +## Listing conversations + +To list all the conversations a user has you can use the `.list()` method. It works the same way as any other Amplify data model would. You can optionally pass a `limit` or `nextToken`. + +```ts +const { data: conversations } = await client.conversations.chat.list() +``` + +The `updatedAt` field gets updated when new messages are sent, so you can use that to see which conversation had the most recent message. Conversations retrieved via `.list()` are sorted in descending order by `updatedAt`. + +### Pagination +The result of `.list()` contains a `nextToken` property. This can be used to retrieve subsequent pages of conversations. + +```ts +const { data: conversations, nextToken } = await client.conversations.chat.list(); + +// retrieve next page +if (nextToken) { + const { data: nextPageConversations } = await client.conversations.chat.list({ + nextToken + }); +} +``` + +Conversations also have `name` and `metadata` fields you can use to more easily find and resume past conversations. `name` is a string and `metadata` is a JSON object so you can store any extra information you need. + +## Resuming conversations + +You can resume a conversation by calling the `.get()` method with a conversation ID. Both `.create()` and `.get()` return the a conversation instance. + + + +```ts +// list all conversations a user has +// make sure the user has been authenticated with Amplify Auth +const conversationList = await client.conversations.conversation.list(); + +// Retrieve a specific conversation +const { data: conversation } = await client.conversations.chat.get({ id: conversationList[0].id }); + +// list the existing messages in the conversation +const { data: messages } = await conversation.listMessages(); + +// You can now send a message to the conversation +conversation.sendMessage({ + content: [ + {text: "hello"} + ] +}) +``` + + + + + +```tsx +export function Chat({ id }) { + const [ + data: { messages } + handleSendMessage, + ] = useAIConversation('chat', { id }) +} +``` + + + diff --git a/src/pages/[platform]/ai/conversation/index.mdx b/src/pages/[platform]/ai/conversation/index.mdx new file mode 100644 index 00000000000..b9ed364ff48 --- /dev/null +++ b/src/pages/[platform]/ai/conversation/index.mdx @@ -0,0 +1,79 @@ +import { getChildPageNodes } from '@/utils/getChildPageNodes'; +import { getCustomStaticPath } from "@/utils/getCustomStaticPath"; + +export const meta = { + title: "Conversation", + description: + "Learn about conversational AI patterns and how to implement them in Amplify.", + route: '/[platform]/ai/conversation', + platforms: [ + "javascript", + "react-native", + "angular", + "nextjs", + "react", + "vue", + ], +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + const childPageNodes = getChildPageNodes(meta.route); + return { + props: { + meta, + childPageNodes, + showBreadcrumbs: false, + } + }; +} + + +The conversation route simplifies the creation of AI-powered conversation interfaces in your application. It automatically sets up the necessary AppSync API components and Lambda functions to handle streaming multi-turn interactions with Amazon Bedrock foundation models. + +## Key Components + +1. **AppSync API**: Gateway to the conversation route. + - Create new conversation route instance. + - Send messages to conversation route instance. + - Subscribe to real-time updates for assistant responses. + +2. **Lambda Function**: Bridge between AppSync and Amazon Bedrock. + - Retrieve conversation instance history. + - Invokes Bedrock's /converse endpoint. + - Handles tool use responses by invoking AppSync queries. + +3. **DynamoDB**: Stores conversation and message data + - Conversations are scoped to a specific application user. + +## Authentication Flow + +1. The user's OIDC access token is passed from the client to AppSync +2. AppSync forwards this token to the Lambda function +3. The Lambda function uses the token to authenticate requests back to AppSync + +## Usage Scenarios + +Each of the following scenarios have safeguards in place to mitigate risks associated with invoking tools on behalf of the user, including: + +- Amazon CloudWatch log group redacting OIDC access tokens for logs from the Lambda function. +- IAM policies that limit the Lambda function's ability to access other resources. + + +## Data Flow + +1. User sends a message via the AppSync mutation +2. AppSync triggers the Lambda function (default or custom) +3. Lambda processes the message and invokes Bedrock's /converse endpoint + a. If response is a tool use, Lambda function invokes applicable AppSync query. +4. Lambda sends assistant response back to AppSync +5. AppSync sends the response to subscribed clients + +This design allows for real-time, scalable conversations while ensuring that the Lambda function's data access matches that of the application user. + +## Next Steps + + diff --git a/src/pages/[platform]/ai/conversation/knowledge-base/index.mdx b/src/pages/[platform]/ai/conversation/knowledge-base/index.mdx new file mode 100644 index 00000000000..c083c73aeee --- /dev/null +++ b/src/pages/[platform]/ai/conversation/knowledge-base/index.mdx @@ -0,0 +1,148 @@ +import { getCustomStaticPath } from "@/utils/getCustomStaticPath"; + +export const meta = { + title: "Knowledge Base", + description: + "Knowledge bases can be used for retrieval augmented generation or RAG", + platforms: [ + "javascript", + "react-native", + "angular", + "nextjs", + "react", + "vue", + ], +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta, + showBreadcrumbs: false, + }, + }; +} + +[Bedrock knowledge bases](https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html) are a great way to implement Retrieval Augmented Generation, or RAG for short. RAG is a common pattern in building generative AI applications that involves storing a lot of content, like documentation, in a vector database like Postgres with pg_vector or OpenSearch. + + + + +The default setup for a Bedrock knowledge base is OpenSearch Serverless which has a default cost whether or not you use it. You can get a large AWS bill if you are not careful. If you are just testing this out make sure to turn off the OpenSearch Serverless instance when you are done. + + + +## Create a knowledge base + +To integrate Bedrock knowledge base with your conversation route, first [create a Bedrock knowledge base](https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-create.html) in the console, CLI, or with CDK. + +## Create a custom query and tool + +```ts title="amplify/data/resource.ts" +import { type ClientSchema, a, defineData } from "@aws-amplify/backend"; + +const schema = a.schema({ + // highlight-start + knowledgeBase: a + .query() + .arguments({ input: a.string() }) + .handler( + a.handler.custom({ + dataSource: "KnowledgeBaseDataSource", + entry: "./resolvers/kbResolver.js", + }), + ) + .returns(a.string()) + .authorization((allow) => allow.authenticated()), + // highlight-end + + chat: a.conversation({ + aiModel: a.ai.model("Claude 3.5 Sonnet"), + systemPrompt: `You are a helpful assistant.`, + // highlight-start + tools: [ + a.ai.dataTool({ + name: 'searchDocumentation', + description: 'Performs a similarity search over the documentation for ...', + query: a.ref('knowledgeBase'), + }), + ] + // highlight-end + }) +}) +``` + +## Write an AppSync resolver + +Then you'll need to create a JavaScript AppSync resolver to connect the query to the knowledge base. You'll need to know the ID of the knowledge base you want to use, which you can find in the Bedrock console or with the AWS CLI. + +```javascript title="amplify/data/resolvers/kbResolver.js" +export function request(ctx) { + const { input } = ctx.args; + return { + resourcePath: "/knowledgebases/[KNOWLEDGE_BASE_ID]/retrieve", + method: "POST", + params: { + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + retrievalQuery: { + text: input, + }, + }), + }, + }; +} + +export function response(ctx) { + return JSON.stringify(ctx.result.body); +} + +``` + +## Define the data source + +Then in the amplify backend file you will need to create the data source for the knowledge base query and give it permission to call the knowledge base. + +```ts title="amplify/backend.ts" +import { defineBackend } from '@aws-amplify/backend'; +import { auth } from './auth/resource'; +import { data } from './data/resource'; +import { PolicyStatement } from 'aws-cdk-lib/aws-iam'; + +const backend = defineBackend({ + auth, + data, +}); + +const KnowledgeBaseDataSource = + backend.data.resources.graphqlApi.addHttpDataSource( + "KnowledgeBaseDataSource", + `https://bedrock-runtime.${cdk.Stack.of(backend.data).region}.amazonaws.com`, + { + authorizationConfig: { + signingRegion: cdk.Stack.of(backend.data).region, + signingServiceName: "bedrock", + }, + }, + ); + +KnowledgeBaseDataSource.grantPrincipal.addToPrincipalPolicy( + new PolicyStatement({ + resources: [ + `arn:aws:bedrock:${cdk.Stack.of(backend.data).region}:[account ID]:knowledge-base/[knowledge base ID]` + ], + actions: ["bedrock:Retrieve"], + }), +); +``` + +That's it! + + diff --git a/src/pages/[platform]/ai/conversation/response-components/index.mdx b/src/pages/[platform]/ai/conversation/response-components/index.mdx new file mode 100644 index 00000000000..aabb8ac2475 --- /dev/null +++ b/src/pages/[platform]/ai/conversation/response-components/index.mdx @@ -0,0 +1,170 @@ +import { getCustomStaticPath } from "@/utils/getCustomStaticPath"; + +export const meta = { + title: "Response components", + description: + "Create rich conversational interfaces with custom UI responses", + platforms: [ + "nextjs", + "react", + ], +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta, + showBreadcrumbs: false, + }, + }; +} + + +Response components are custom UI components you can define that the AI assistant can respond with. Response components allow you to build conversational interfaces that are more than just text in and text out. + +## How it works + +The `AIConversation` component takes the response components and turns them into [tool](/[platform]/ai/concepts/tools) configurations to send to the LLM. The tool configurations get sent when a user message is sent to the backend, then the backend Lambda merges the tools coming from the client and any [schema tools](/[platform]/ai/conversation/tools). The LLM sees that it can invoke UI component "tool", with certain input/props. If the LLM chooses to use a response component tool, a message gets sent to the client with the response component name and props. The `AIConversation` component will then try to render the React component provided with the props the LLM sends. + +It is important to know that the LLM is NOT writing raw code that sent to the browser and evaluated. + +## Structure + +The `responseComponents` prop on the `AIConversation` component takes a named object where the keys are the component names and the value is the component definition. + +A response component has: + +* `description`: A description of the component. The more descriptive, the easier it is +* `component`: The React component to render. The props of the component should match the props +* `props`: The props for the React component in [JSONSchema format](https://json-schema.org/understanding-json-schema/reference). + + +```tsx + { + return ( +
{city}
+ ) + }, + props: { + city: { + type: "string", + required: true, + description: "The name of the city to display the weather for", + }, + }, + }, + }} + // highlight-end +/> +``` + +## Passing context back to the assistant + +When a user sends a message to the AI assistant from the client, you can optionally send `aiContext` with the message. `aiContext` is any information about the current state of the client application that might be useful for the AI assistant to know to help it respond better. `aiContext` could be things like the user's name, or the current state of the application like what page they are currently on. AI context is a plain object that will get stringified and sent to the AI assistant with the next user message. + +You can use the `aiContext` to let the AI assistant know what was rendered in the response component so it can have more context to respond with. + +It can be helpful to continue the conversation to add some context to the next message in the conversation to let the AI know what was displayed to the user. Because a UI component can have state and also data not included in a prop, + + +```tsx +// Create a context to share state across components +// highlight-start +const DataContext = React.createContext<{ + data: any; + setData: (value: React.SetStateAction) => void; +}>({ data: {}, setData: () => {} }); + +function WeatherCard({ city }: { city: string }) { + const { setData } = React.useContext(DataContext); + + React.useEffect(() => { + // fetch some weather data + // set the data context + setData({ + city, + // weather info + }) + },[city]) + + return ( +
{city}
+ ) +} +// highlight-start + + +function Chat() { + const { data } = React.useContext(DataContext); + const [ + { + data: { messages }, + isLoading, + }, + sendMessage, + ] = useAIConversation('chat'); + + return ( + { + return { + ...data, + }; + }} + // highlight-end + /> + ); +} + +export default function Example() { + const [data, setData] = React.useState({}); + return ( + + + + + + ) +} +``` + +## Fallback + +Because response components are defined at runtime in your React code, but conversation history is stored in a database, there can be times when there is a message in the conversation history that has a response component you no longer have. To handle these situations there is a `FallbackResponseComponent` prop you can use. + +```tsx + { + return <>{JSON.stringify(props)} + }} + // highlight-end +/> +``` diff --git a/src/pages/[platform]/ai/conversation/tools/index.mdx b/src/pages/[platform]/ai/conversation/tools/index.mdx new file mode 100644 index 00000000000..2a67d927288 --- /dev/null +++ b/src/pages/[platform]/ai/conversation/tools/index.mdx @@ -0,0 +1,264 @@ +import { getCustomStaticPath } from "@/utils/getCustomStaticPath"; + +export const meta = { + title: "Tools", + description: + "Tools allow LLMs to take action or query information so it can respond with up to date information.", + platforms: [ + "javascript", + "react-native", + "angular", + "nextjs", + "react", + "vue", + ], +}; + +export const getStaticPaths = async () => { + return getCustomStaticPath(meta.platforms); +}; + +export function getStaticProps(context) { + return { + props: { + platform: context.params.platform, + meta, + showBreadcrumbs: false, + }, + }; +} + + + +Tools allow LLMs to take action or query information so it can respond with up to date information. There are a few different ways to define LLM tools in the Amplify AI kit. + +1. Model tools +2. Query tools +3. Lambda tools + +The easiest way you can define tools for the LLM to use is with data models and custom queries in your data schema. When you define tools in your data schema, Amplify will take care of all of the heavy lifting required to properly implement such as: + +* **Describing the tools to the LLM:** because each tool is a custom query or data model that is defined in the schema, Amplify knows the input shape needed for that tool +* **Invoking the tool with the right parameters:** after the LLM responds it wants to call a tool, the code that initially called the LLM needs to then run that code. +* **Maintaining the caller identity and authorization:** we don't want users to have access to more data through the LLM than they normally would, so when the LLM wants to invoke a tool we will call it with the user's identity. For example, if the LLM wanted to invoke a query to list Todos, it would only return the todos of the user and not everyone's todos. + +## Model tools + +You can give the LLM access to your data models by referencing them in an `a.ai.dataTool()` with a reference to a model in your data schema. + +```ts +const schema = a.schema({ + Post: a.model({ + title: a.string(), + body: a.string(), + }) + .authorization(allow => allow.owner()), + + chat: a.conversation({ + aiModel: a.ai.model('Claude 3 Haiku'), + systemPrompt: 'Hello, world!', + tools: [ + a.ai.dataTool({ + name: 'PostQuery', + description: 'Searches for Post records', + model: a.ref('Post'), + modelOperation: 'list', + }), + ], + }), +}) +``` + +This will let the LLM list and filter `Post` records. Because the data schema has all the information about the shape of a `Post` record, the data tool will provide that information to the LLM so you don't have to. Also, the Amplify AI kit handles authorizing the tool use requests based on the caller's identity. This means if you have an owner-based model, the LLM will only be able to query the user's records. + +*The only supported model operation currently is 'list'* + +## Query tools + +You can also give the LLM access to custom queries. You can define a custom query with a [Function](/[platform]/build-a-backend/functions/set-up-function/) handler and then reference that custom query as a tool. + +```ts title="amplify/data/resource.ts" +// highlight-start +import { type ClientSchema, a, defineData, defineFunction } from "@aws-amplify/backend"; +// highlight-end + +// highlight-start +export const getWeather = defineFunction({ + name: 'getWeather', + entry: 'getWeather.ts' +}); +// highlight-end + +const schema = a.schema({ + // highlight-start + getWeather: a.query() + .arguments({ city: a.string() }) + .returns(a.customType({ + value: a.integer(), + unit: a.string() + })) + .handler(a.handler.function(getWeather)) + .authorization((allow) => allow.authenticated()), + // highlight-end + + chat: a.conversation({ + aiModel: a.ai.model('Claude 3 Haiku'), + systemPrompt: 'You are a helpful assistant', + // highlight-start + tools: [ + a.ai.dataTool({ + name: 'getWeather', + description: 'Gets the weather for a given city', + query: a.ref('getWeather'), + }), + ] + // highlight-end + }), +}); +``` + +Because the definition of the query itself has the shape of the inputs and outputs (arguments and returns), the Amplify data tool can automatically tell the LLM exactly how to call the custom query. + + + +The description of the tool is very important to help the LLM know when to use that tool. The more descriptive you are about what the tool does, the better. + + + +Here is an example Lambda function handler for our `getWeather` query: + +```ts title="amplify/data/getWeather.ts" +import type { Schema } from "./resource"; + +export const handler: Schema["getWeather"]["functionHandler"] = async ( + event +) => { + // This returns a mock value, but you can connect to any API, database, or other service + return { + value: 42, + unit: 'C' + }; +} +``` + +Lastly, you will need to update your **`amplify/backend.ts`** file to include the newly defined `getWeather` function. + +```ts title="amplify/backend.ts" +// highlight-start +import { getWeather } from "./data/resource"; +// highlight-end + +const backend = defineBackend({ + auth, + data, + // highlight-start + getWeather + // highlight-end +}); +``` + + +## Connect to any AWS Service + +You can connect to any AWS service by defining a custom query and calling that service in the function handler. Then you will need to provide the Lambda with the proper permissions to call the AWS service. + +```ts title="amplify/backend.ts" +import { defineBackend } from "@aws-amplify/backend"; +import { auth } from "./auth/resource"; +import { data } from "./data/resource"; +import { storage } from "./storage/resource"; +import { getWeather } from "./functions/getWeather/resource"; +import { PolicyStatement } from "aws-cdk-lib/aws-iam"; + +const backend = defineBackend({ + auth, + data, + storage, + getWeather +}); + +backend.getWeather.resources.lambda.addToRolePolicy( + new PolicyStatement({ + resources: ["[resource arn]",], + actions: ["[action]"], + }), +) +``` + + + +## Custom Lambda Tools + +Conversation routes can also have completely custom tools defined in a Lambda handler. + +### Create a custom conversation handler function + +```ts title="amplify/data/resource.ts" +import { type ClientSchema, a, defineData } from '@aws-amplify/backend'; +import { defineConversationHandlerFunction } from '@aws-amplify/backend-ai/conversation'; + +const chatHandler = defineConversationHandlerFunction({ + entry: './chatHandler.ts', + name: 'customChatHandler', + models: [ + { modelId: a.ai.model("Claude 3 Haiku") } + ] +}); + +const schema = a.schema({ + chat: a.conversation({ + aiModel: a.ai.model('Claude 3 Haiku'), + systemPrompt: "You are a helpful assistant", + handler: chatHandler, + }) +}) +``` + +### Implement the custom handler + +```ts title="amplify/data/chatHandler.ts" + +import { + ConversationTurnEvent, + ExecutableTool, + handleConversationTurnEvent, +} from '@aws-amplify/ai-constructs/conversation/runtime'; +import { ToolResultContentBlock } from '@aws-sdk/client-bedrock-runtime'; + +const thermometer: ExecutableTool = { + name: 'thermometer', + description: 'Returns current temperature in a city', + execute: (input): Promise => { + if (input.city === 'Seattle') { + return Promise.resolve({ + text: `75F`, + }); + } + return Promise.resolve({ + text: 'unknown' + }) + }, + inputSchema: { + json: { + type: 'object', + 'properties': { + 'city': { + 'type': 'string', + 'description': 'The city name' + } + }, + required: ['city'] + } + } +}; + +/** + * Handler with simple tool. + */ +export const handler = async (event: ConversationTurnEvent) => { + await handleConversationTurnEvent(event, { + tools: [thermometer], + }); +}; +``` +