diff --git a/.gitignore b/.gitignore index d9bfb1db..e08d0e0c 100644 --- a/.gitignore +++ b/.gitignore @@ -11,7 +11,7 @@ node_modules/ *.lock *.map .github/workflows/nodejs.yml - +go.work.sum app/**/*.map config/**/*.map diff --git a/.prettierrc.js b/.prettierrc.cjs similarity index 100% rename from .prettierrc.js rename to .prettierrc.cjs diff --git a/app/app.less b/app/app.less index 3a75536f..9a7b0df9 100644 --- a/app/app.less +++ b/app/app.less @@ -1,227 +1,237 @@ @import './common.less'; @font-face { - font-family: 'Roboto-Black'; - src: url('@app/static/fonts/Roboto-Black.ttf') format('truetype'); + font-family: 'Roboto-Black'; + src: url('@app/static/fonts/Roboto-Black.ttf') format('truetype'); } @font-face { - font-family: 'Roboto-Bold'; - src: url('@app/static/fonts/Roboto-Bold.ttf') format('truetype'); + font-family: 'Roboto-Bold'; + src: url('@app/static/fonts/Roboto-Bold.ttf') format('truetype'); } @font-face { - font-family: 'Roboto-Light'; - src: url('@app/static/fonts/Roboto-Light.ttf') format('truetype'); + font-family: 'Roboto-Light'; + src: url('@app/static/fonts/Roboto-Light.ttf') format('truetype'); } @font-face { - font-family: 'Roboto-Medium'; - src: url('@app/static/fonts/Roboto-Medium.ttf') format('truetype'); + font-family: 'Roboto-Medium'; + src: url('@app/static/fonts/Roboto-Medium.ttf') format('truetype'); } @font-face { - font-family: 'Roboto-Regular'; - src: url('@app/static/fonts/Roboto-Regular.ttf') format('truetype'); + font-family: 'Roboto-Regular'; + src: url('@app/static/fonts/Roboto-Regular.ttf') format('truetype'); } @font-face { - font-family: 'Roboto-Mono'; - src: url('@app/static/fonts/Roboto-Mono.ttf') format('truetype'); + font-family: 'Roboto-Mono'; + src: url('@app/static/fonts/Roboto-Mono.ttf') format('truetype'); } #studioApp { - font-family: Roboto-Regular, sans-serif; + font-family: Roboto-Regular, sans-serif; } .studioCenterLayout { - width: @containerWidth; - margin: 0 auto; + width: @containerWidth; + margin: 0 auto; } .studioTabHeader { - display: flex; - justify-content: center; - padding-bottom: 16px; + display: flex; + justify-content: center; + padding-bottom: 16px; } .ant-radio-group.studioTabGroup { - background: @lightGray; - border-radius: 20px; - padding: 4px; - min-width: 400px; - display: flex; - justify-content: center; - - .ant-radio-button-wrapper { - border-radius: 20px; - flex: 1; - text-align: center; - border: none; - white-space: nowrap; - color: @darkBlue; - - &.ant-radio-button-wrapper-checked { - color: #fff; - } - - &:not(.ant-radio-button-wrapper-checked) { - background: none; - } - - &::before { - width: 0; - } - } + background: @lightGray; + border-radius: 20px; + padding: 4px; + min-width: 400px; + display: flex; + justify-content: center; + + .ant-radio-button-wrapper { + border-radius: 20px; + flex: 1; + text-align: center; + border: none; + white-space: nowrap; + color: @darkBlue; + + &.ant-radio-button-wrapper-checked { + color: #fff; + } + + &:not(.ant-radio-button-wrapper-checked) { + background: none; + } + + &::before { + width: 0; + } + } } .ant-btn { - font-family: Roboto-Regular, sans-serif; + font-family: Roboto-Regular, sans-serif; } .ant-btn.warningBtn { - color: @red; - border-color: @red; - display: inline-flex; - align-items: center; - justify-content: center; - border-radius: 3px; + color: @red; + border-color: @red; + display: inline-flex; + align-items: center; + justify-content: center; + border-radius: 3px; - svg { - width: 20px; - height: 20px; - } + svg { + width: 20px; + height: 20px; + } - &.ant-btn-link { - border: none; - } + &.ant-btn-link { + border: none; + } - &:hover { - color: @red; - border-color: @red; - } + &:hover { + color: @red; + border-color: @red; + } } .ant-btn.primaryBtn { - color: @blue; - border-color: @blue; - display: inline-flex; - align-items: center; - justify-content: center; - border-radius: 3px; + color: @blue; + border-color: @blue; + display: inline-flex; + align-items: center; + justify-content: center; + border-radius: 3px; - svg { - width: 20px; - height: 20px; - } + svg { + width: 20px; + height: 20px; + } - &.ant-btn-link { - border: none; - } + &.ant-btn-link { + border: none; + } - &:hover { - color: @blue; - border-color: @blue; - } + &:hover { + color: @blue; + border-color: @blue; + } } .ant-btn.cancelBtn { - color: @darkGray; - border-color: @darkGray; - display: inline-flex; - align-items: center; - justify-content: center; - border-radius: 3px; + color: @darkGray; + border-color: @darkGray; + display: inline-flex; + align-items: center; + justify-content: center; + border-radius: 3px; - svg { - width: 20px; - height: 20px; - } + svg { + width: 20px; + height: 20px; + } - &.ant-btn-link { - border: none; - } + &.ant-btn-link { + border: none; + } - &:hover { - color: @darkGray; - border-color: @darkGray; - } + &:hover { + color: @darkGray; + border-color: @darkGray; + } } .ant-btn.studioAddBtn { - border-radius: 3px; - display: inline-flex; - align-items: center; - padding: 0 29px; + border-radius: 3px; + display: inline-flex; + align-items: center; + padding: 0 29px; - a { - display: flex; - align-items: center; - } + a { + display: flex; + align-items: center; + } - .studioAddBtnIcon { - display: inline-flex; - margin-right: 10px; - height: 22px; + .studioAddBtnIcon { + display: inline-flex; + margin-right: 10px; + height: 22px; - > svg { - width: 22px; - height: 22px; - } + >svg { + width: 22px; + height: 22px; + } - & ~ span { - margin-left: 0; - } - } + &~span { + margin-left: 0; + } + } } .studioFormFooter { - position: fixed; - left: 0; - bottom: 0; - z-index: 10; - width: 100%; - height: 98px; - display: flex; - align-items: center; - justify-content: center; - background: #fff; - box-shadow: 0 -4px 4px rgba(0, 0, 0, 0.1); - - button { - width: 236px; - - &:not(:last-child) { - margin-right: 50px; - } - } + position: fixed; + left: 0; + bottom: 0; + z-index: 10; + width: 100%; + height: 98px; + display: flex; + align-items: center; + justify-content: center; + background: #fff; + box-shadow: 0 -4px 4px rgba(0, 0, 0, 0.1); + + button { + width: 236px; + + &:not(:last-child) { + margin-right: 50px; + } + } } .ant-form-item-label label { - font-family: Roboto-Bold, sans-serif; + font-family: Roboto-Bold, sans-serif; } .ant-table .ant-table-row { - background: #fff; + background: #fff; } // for thoes who use command pattern to create a Modal and other components // just `ConfigProvider` is not enough .ant-modal .ant-modal-content { - padding: 0; - .ant-modal-header { - padding: 16px 24px; - border-bottom: 1px solid #f0f0f0; - margin-bottom: 0; - } - .ant-modal-body { - padding: 24px; - .ant-modal-confirm-body-wrapper { - padding: 24px 0; - } - } - .ant-modal-footer { - padding: 10px 16px; - border-top: 1px solid #f0f0f0; - } + padding: 0; + + .ant-modal-header { + padding: 16px 24px; + border-bottom: 1px solid #f0f0f0; + margin-bottom: 0; + } + + .ant-modal-body { + padding: 24px; + + .ant-modal-confirm-body-wrapper { + padding: 24px 0; + } + } + + .ant-modal-footer { + padding: 10px 16px; + border-top: 1px solid #f0f0f0; + } } + +.ant-btn-default { + border: 1px solid @blue; + border-radius: 3px; + color: @blue; +} \ No newline at end of file diff --git a/app/components/FileConfigSetting/index.tsx b/app/components/FileConfigSetting/index.tsx index c995e861..24b17237 100644 --- a/app/components/FileConfigSetting/index.tsx +++ b/app/components/FileConfigSetting/index.tsx @@ -66,7 +66,7 @@ const FileConfigSetting = (props: IProps) => { const readFile = useCallback( debounce(() => { const { activeItem, setState } = state; - if (!activeItem) return; + if (!activeItem || !(activeItem.path.indexOf('.csv') > -1)) return; setState({ loading: true }); let content = []; if (activeItem.sample !== undefined) { @@ -93,7 +93,10 @@ const FileConfigSetting = (props: IProps) => { content = [...content, row.data]; }, complete: () => { - setState({ loading: false, previewContent: content }); + setState({ + loading: false, + previewContent: content, + }); }, }); } @@ -226,11 +229,12 @@ const FileConfigSetting = (props: IProps) => { ), key: 'withHeader', width: '30%', - render: (record) => ( - updateItem(e, record)}> - {intl.get('import.hasHeader')} - - ), + render: (record) => + record.path.indexOf('.csv') > -1 && ( + updateItem(e, record)}> + {intl.get('import.hasHeader')} + + ), }, { title: ( @@ -248,9 +252,10 @@ const FileConfigSetting = (props: IProps) => { ), key: 'delimiter', width: '30%', - render: (record) => ( - updateDelimiter(e, record)} /> - ), + render: (record) => + record.path.indexOf('.csv') > -1 && ( + updateDelimiter(e, record)} /> + ), }, { key: 'operation', diff --git a/app/config/locale/en-US.ts b/app/config/locale/en-US.ts index 7cecfcd0..5ff7f86f 100644 --- a/app/config/locale/en-US.ts +++ b/app/config/locale/en-US.ts @@ -69,6 +69,9 @@ export default { keyword: 'Keyword', function: 'Function', historyRecord: 'History', + language: 'Language', + switchOn: 'On', + switchOff: 'Off', }, doc: { welcome: 'Welcome to', @@ -168,6 +171,8 @@ export default { historyTip: 'You can directly use the "/" key in the console to quickly select historical query statements.', runSelectionRows: 'Run selected rows', selectEmpty: 'Please select the row', + send: 'Send', + copy2NGQL: 'Copy to Console', }, explore: { vertexStyle: 'Vertex Color', @@ -530,4 +535,32 @@ export default { alwaysShow: 'Always show the welcome page', progressTitle: 'Download & Import Data', }, + setting: { + globalSetting: 'Global Settings', + betaFunction: 'Beta Functions', + viewScmemaBetaFunDesc: + 'View the schema structure of the specified graph space, located at Schema-> Operations-> View Schema', + text2query: 'Text to Query', + text2queryDesc: 'You can use the LLM capability to write query statements in the console', + llmImport: 'AI Import', + llmImportDesc: 'Knowledge Graph Build with LLM and NebulaGraph, located at Import data -> New AI Import', + maxTextLength: 'Max Text Length', + verify: 'Verify', + }, + llm: { + newAiImport: 'New AI Import', + importTip: 'Please create the schema before creating an import task.', + setup: 'Setup', + confirm: 'Confirm', + file: 'File', + filePath: 'File Path', + importGraphSpace: 'Import Graph Space', + exportNGQLFilePath: 'Export NGQL File Path', + prompt: 'Prompt', + next: 'Next', + url: 'URL', + previous: 'Previous', + start: 'Start', + aiImport: 'AI Import', + }, }; diff --git a/app/config/locale/zh-CN.ts b/app/config/locale/zh-CN.ts index 40c5b090..7c55cf9f 100644 --- a/app/config/locale/zh-CN.ts +++ b/app/config/locale/zh-CN.ts @@ -69,6 +69,9 @@ export default { keyword: '关键字', function: '函数', historyRecord: '历史记录', + language: '语言', + switchOn: '开启', + switchOff: '关闭', }, doc: { welcome: '欢迎使用', @@ -162,6 +165,8 @@ export default { historyTip: '您可以直接输入“/”键快速选择历史查询语句。', runSelectionRows: '运行选中行', selectEmpty: '请先选择要运行的行', + send: '发送', + copy2NGQL: '复制到控制台', }, explore: { vertexStyle: '节点颜色', @@ -511,4 +516,30 @@ export default { alwaysShow: '始终展示欢迎页', progressTitle: '下载 & 导入数据', }, + setting: { + globalSetting: '全局设置', + betaFunction: 'Beta 功能', + viewScmemaBetaFunDesc: '查看指定图空间的 schema 结构,位于 Schema --> [图空间] --> 查看 Schema', + text2query: '文本转查询', + text2queryDesc: '在控制台中使用 LLM 功能,将自然语言转换为查询语句', + llmImport: 'AI 导入', + llmImportDesc: '使用 LLM 和 Nebula Graph 构建知识图谱,位于导入数据 --> 新建 AI 导入', + maxTextLength: '文本最大长度', + verify: '确认', + }, + llm: { + newAiImport: 'New AI Import', + importTip: '请先创建图空间和 schema', + setup: '设置', + confirm: '确认', + file: '文件', + filePath: '文件路径', + importGraphSpace: '导入图空间', + exportNGQLFilePath: '导出 NGQL 文件路径', + prompt: '提示', + next: '下一步', + previous: '上一步', + start: '开始', + aiImport: 'AI 导入', + }, }; diff --git a/app/interfaces/import.ts b/app/interfaces/import.ts index 6a5668ea..223079c4 100644 --- a/app/interfaces/import.ts +++ b/app/interfaces/import.ts @@ -1,3 +1,4 @@ +import { ILLMStatus } from '@app/pages/Import/TaskList/TaskItem/AIImportItem'; import { RcFile } from 'antd/lib/upload'; export enum ITaskStatus { @@ -36,6 +37,30 @@ export interface ITaskItem { message: string; stats: ITaskStats; rawConfig: string; + llmJob?: ILLMJob; +} + +export interface ILLMJob { + user_name: string; + host: string; + job_id: string; + space: string; + space_schema_string: string; + file?: string; + file_path?: string; + job_type: string; + status: ILLMStatus; + prompt_template: string; + process: { + total: number; + current: number; + ratio: number; + failed_reason: string; + prompt_tokens: number; + completion_tokens: number; + }; + update_time: string; + create_time: string; } export interface IPropertyProps { diff --git a/app/pages/Console/Setting.tsx b/app/pages/Console/Setting.tsx new file mode 100644 index 00000000..c64263e8 --- /dev/null +++ b/app/pages/Console/Setting.tsx @@ -0,0 +1,93 @@ +import { Radio, Form, Input, Modal, Checkbox, InputNumber, Switch } from 'antd'; +import { useEffect } from 'react'; +import { post } from '@app/utils/http'; +import llm from '@app/stores/llm'; +import { observer } from 'mobx-react-lite'; +import { useI18n } from '@vesoft-inc/i18n'; +import styles from './index.module.less'; + +function Setting({ open, setVisible }) { + const [form] = Form.useForm(); + const { intl } = useI18n(); + const onClose = () => { + setVisible(false); + }; + const onOk = async () => { + const values = await form.validateFields(); + const { url, key, llmVersion, apiType, ...config } = values; + const res = await post('/api/config/llm')({ + url, + key, + llmVersion, + apiType, + config: JSON.stringify(config), + }); + if (res.code === 0) { + setVisible(false); + llm.setConfig(values); + } + }; + + useEffect(() => { + if (!open) return; + initForm(); + }, [open]); + + async function initForm() { + await llm.fetchConfig(); + form.setFieldsValue(llm.config); + } + return ( + + + + + + + + + LLM2nGQL + BETA + + + + + + Copilot + BETA + + + + + + + + + + + + + azure + openai + + + + + llm3.5-turbo + llm4 + + + + + {intl.get('console.useSpaceSchema')} + {intl.get('console.useConsoleNGQL')} + + + + + + + + ); +} +export default observer(Setting); diff --git a/app/pages/Import/AIImport/Create.tsx b/app/pages/Import/AIImport/Create.tsx new file mode 100644 index 00000000..f07a797d --- /dev/null +++ b/app/pages/Import/AIImport/Create.tsx @@ -0,0 +1,182 @@ +import { useStore } from '@app/stores'; +import { useI18n } from '@vesoft-inc/i18n'; +import { Button, Form, Input, Modal, Radio, Select, Tabs, message } from 'antd'; +import { observer } from 'mobx-react-lite'; +import styles from './index.module.less'; +import Icon from '@app/components/Icon'; +import { useEffect, useMemo, useState } from 'react'; +import { llmImportTask, llmImportPrompt } from '@app/stores/llm'; +import { getByteLength } from '@app/utils/function'; +import { post } from '@app/utils/http'; + +const Create = observer((props: { visible: boolean; onCancel: () => void }) => { + const { llm, schema, files } = useStore(); + const { fileList } = files; + const { intl } = useI18n(); + const [form] = Form.useForm(); + const [type, setType] = useState('file'); + const [step, setStep] = useState(0); + const [file, setFile] = useState(null); + const [space, setSpace] = useState(null); + const [tokens, setTokens] = useState(0); + const valuse = useMemo(() => { + return form.getFieldsValue(); + }, [step]); + useEffect(() => { + if (!props.visible) return; + llm.fetchConfig(); // refetch for update config + files.getFiles(); + setStep(0); + form.resetFields(); + form.setFieldsValue({ + type: 'file', + promptTemplate: llmImportPrompt, + }); + }, [props.visible]); + + const onNext = () => { + form.validateFields().then((values) => { + setStep(1); + }); + }; + + useEffect(() => { + (async () => { + if (file && space) { + const types = { + csv: 0.9, + json: 0.6, + pdf: 0.05, + }; + const subfix = file.name.split('.').pop(); + const type = types[subfix] || 0.5; + const size = file.size; + const schema = await llm.getSpaceSchema(space); + const schemaBytesLength = getByteLength(schema); + // full connection + const tokensNum = ((((schemaBytesLength * size) / 2000) * llm.config.maxContextLength) / 2000) * type; + setTokens(tokensNum); + } + })(); + }, [file, space]); + + const onConfirm = async () => { + const values = form.getFieldsValue(); + const schema = await llm.getSpaceSchema(space); + post('/api/llm/import/job')({ + type, + ...values, + spaceSchemaString: schema, + }).then((res) => { + if (res.code === 0) { + message.success(intl.get('common.success')); + props.onCancel(); + } + }); + }; + + return ( + + + + {intl.get('llm.importTip')} + + + { + setStep(0); + }} + > + + {intl.get('llm.setup')} + + + + + {intl.get('llm.confirm')} + + + {tokens !== 0 && ( + + 🅣 prompt token: ~ + {Math.ceil(tokens / 10000)}w + + )} + + + {llm.config.features.includes('aiImportFilePath') && ( + + { + setType(e.target.value); + form.setFieldValue('file', undefined); + }} + > + {intl.get('llm.file')} + {intl.get('llm.filePath')} + + + )} + + {type === 'file' ? ( + + { + setFile(option?.data); + }} + > + {fileList.map((item) => ( + + {item.name} + + ))} + + + ) : ( + + + + )} + + + setSpace(v)}> + {schema.spaces.map((item) => ( + {item} + ))} + + + + + + + + + + + + + {type === 'file' ? valuse.file : valuse.filePath} + + + + + + + {intl.get('common.cancel')} + {step == 1 && setStep(0)}>{intl.get('llm.previous')}} + {step == 0 && ( + + {intl.get('llm.next')} + + )} + {step == 1 && ( + + {intl.get('llm.start')} + + )} + + + ); +}); +export default Create; diff --git a/app/pages/Import/AIImport/index.module.less b/app/pages/Import/AIImport/index.module.less new file mode 100644 index 00000000..2a5b0096 --- /dev/null +++ b/app/pages/Import/AIImport/index.module.less @@ -0,0 +1,67 @@ +.buttonArea { + display: flex; + align-items: center; + justify-content: flex-end; + gap: 10px; +} + +.tips { + display: flex; + padding: 10px 20px; + align-items: flex-start; + gap: 10px; + align-self: stretch; + background-color: #D4EBFF; + border-radius: 11px; + font-size: 16px; + font-style: normal; + font-weight: 500; + + svg { + color: #0D8BFF; + width: 24px; + height: 24px; + } +} + +.step { + display: flex; + align-items: center; + justify-content: center; + gap: 10px; + margin: 20px 0; + color: #0D8BFF; + + >div { + cursor: pointer; + display: flex; + align-items: center; + gap: 5px; + font-size: 16px; + + svg { + width: 35px; + height: 35px; + } + } + + >span { + + + width: 197px; + height: 1px; + display: inline-block; + border-bottom: 1px dashed #D4D4D8; + } +} + +.tokenNum { + position: absolute; + top: 200px; + right: 20px; + display: flex; + align-items: center; + gap: 5px; + justify-content: center; + color: #71717A; +} \ No newline at end of file diff --git a/app/pages/Import/DatasourceList/DatasourceConfig/FileUploadBtn/index.tsx b/app/pages/Import/DatasourceList/DatasourceConfig/FileUploadBtn/index.tsx index b098b280..a326aa68 100644 --- a/app/pages/Import/DatasourceList/DatasourceConfig/FileUploadBtn/index.tsx +++ b/app/pages/Import/DatasourceList/DatasourceConfig/FileUploadBtn/index.tsx @@ -9,8 +9,8 @@ import { getFileSize } from '@app/utils/file'; import FileConfigSetting from '@app/components/FileConfigSetting'; import styles from './index.module.less'; type IUploadBtnProps = PropsWithChildren<{ - onUpload?: () => void -}> + onUpload?: () => void; +}>; const SizeLimit = 200 * 1024 * 1024; const UploadBtn = (props: IUploadBtnProps) => { @@ -21,12 +21,17 @@ const UploadBtn = (props: IUploadBtnProps) => { const { fileList, uploadFile } = files; const [visible, setVisible] = useState(false); const transformFile = async (_file: StudioFile, fileList: StudioFile[]) => { - const bigFiles = fileList.filter(file => file.size > SizeLimit); - if(bigFiles.length > 0) { - message.error(intl.get('import.fileSizeLimit', { name: bigFiles.map(i => i.name).join(', '), size: getFileSize(SizeLimit) })); + const bigFiles = fileList.filter((file) => file.size > SizeLimit); + if (bigFiles.length > 0) { + message.error( + intl.get('import.fileSizeLimit', { + name: bigFiles.map((i) => i.name).join(', '), + size: getFileSize(SizeLimit), + }), + ); return false; } - fileList.forEach(file => { + fileList.forEach((file) => { file.path = `${file.name}`; file.withHeader = false; file.delimiter = ','; @@ -46,7 +51,6 @@ const UploadBtn = (props: IUploadBtnProps) => { <> {}} @@ -63,11 +67,12 @@ const UploadBtn = (props: IUploadBtnProps) => { className={styles.uploadModal} footer={false} > - setVisible(false)} /> + onCancel={() => setVisible(false)} + /> > ); diff --git a/app/pages/Import/TaskList/TaskItem/AIImportItem.tsx b/app/pages/Import/TaskList/TaskItem/AIImportItem.tsx new file mode 100644 index 00000000..32ed844d --- /dev/null +++ b/app/pages/Import/TaskList/TaskItem/AIImportItem.tsx @@ -0,0 +1,231 @@ +import { Button, Popconfirm, Progress, Tooltip, message as antMsg } from 'antd'; +import { CheckCircleFilled } from '@ant-design/icons'; +import { ITaskItem } from '@app/interfaces/import'; +import dayjs from 'dayjs'; +import { getFileSize } from '@app/utils/file'; +import Icon from '@app/components/Icon'; +import { useI18n } from '@vesoft-inc/i18n'; +import { observer } from 'mobx-react-lite'; +import { useStore } from '@app/stores'; +import styles from './index.module.less'; +import { _delete, post } from '@app/utils/http'; +import React from 'react'; +interface IProps { + data: ITaskItem; + onViewLog: (data: ITaskItem) => void; + onRefresh: () => void; +} +export enum ILLMStatus { + Running = 'running', + Success = 'success', + Failed = 'failed', + Cancel = 'cancel', + Pending = 'pending', +} +const llmStatusMap = { + [ILLMStatus.Running]: 'active', + [ILLMStatus.Success]: 'success', + [ILLMStatus.Failed]: 'execption', + [ILLMStatus.Cancel]: 'execption', + [ILLMStatus.Pending]: 'normal', +}; +const COLOR_MAP = { + success: { + from: '#8EDD3F', + to: '#27AE60', + }, + normal: { + from: '#8EDD3F', + to: '#27AE60', + }, + execption: { + from: '#EB5757', + to: '#EB5757', + }, + active: { + from: '#58D7FF', + to: '#2F80ED', + }, +}; +const loadingStatus = [ILLMStatus.Running, ILLMStatus.Pending]; +const AIImportItem = observer((props: IProps) => { + const { + data: { createTime, space, llmJob, id }, + onViewLog, + } = props; + const { intl } = useI18n(); + const { + dataImport: { downloadTaskConfig }, + moduleConfiguration, + } = useStore(); + const [rerunLoading, setRerunLoading] = React.useState(false); + const { disableConfigDownload } = moduleConfiguration.dataImport; + + const progressStatus = llmStatusMap[llmJob.status]; + + const onTaskDelete = () => { + _delete('/api/llm/import/job/' + llmJob.job_id)().then((res) => { + if (res.code === 0) { + antMsg.success(intl.get('import.deleteSuccess')); + props.onRefresh(); + } + }); + }; + + const onTaskStop = () => { + post('/api/llm/import/job/cancel')({ jobId: llmJob.job_id }).then((res) => { + if (res.code === 0) { + antMsg.success(intl.get('import.stopImportingSuccess')); + props.onRefresh(); + } + }); + }; + const handleRerun = async () => { + setRerunLoading(true); + const res = await post('/api/llm/import/job/rerun')({ jobId: llmJob.job_id }); + if (res.code === 0) { + antMsg.success(intl.get('common.rerunSuccess')); + props.onRefresh(); + } + setRerunLoading(false); + }; + + return ( + <> + + + + {intl.get('common.space')}: {space} + + + {llmJob.status === ILLMStatus.Pending ? ( + <> + + {intl.get('import.modifyTime')}: {dayjs(llmJob.update_time).format('YYYY-MM-DD HH:mm:ss')} + + > + ) : ( + <> + + {intl.get('common.createTime')}: {dayjs(createTime).format('YYYY-MM-DD HH:mm:ss')} + + + prompt tokens:{llmJob.process?.prompt_tokens || '-'}/ completion tokens:{' '} + {llmJob.process?.completion_tokens || '-'} + + > + )} + + + + + + + + {intl.get('llm.aiImport')} + + {llmJob.space} + {llmJob.status === ILLMStatus.Success && ( + + + {intl.get('import.importCompleted')} + + {llmJob.process?.failed_reason && ` (${llmJob.process.failed_reason})`} + + + )} + {llmJob.status === ILLMStatus.Pending && ( + {intl.get('import.importPending')} + )} + {llmJob.status === ILLMStatus.Running && ( + {intl.get('import.importRunning')} + )} + {llmJob.status === ILLMStatus.Failed && ( + + {intl.get('import.importFailed')} + {llmJob.process?.failed_reason && ( + {` (${llmJob.process.failed_reason.slice( + 0, + 20, + )}...)`} + )} + + )} + {llmJob.status === ILLMStatus.Cancel && ( + {intl.get('import.importStopped')} + )} + + + {llmJob.process && ( + + {`${getFileSize(llmJob.process.current)} / `} + {getFileSize(llmJob.process.total)}{' '} + + )} + + + `${percent.toFixed(1)}%`} + status={progressStatus as any} + percent={llmJob.process?.ratio * 100 || 0} + strokeColor={progressStatus && COLOR_MAP[progressStatus]} + /> + + + onViewLog(props.data)}> + + + + + {llmJob.status === ILLMStatus.Running && ( + + + + + + + + )} + + {!loadingStatus.includes(llmJob.status) && ( + <> + + + + + + + + + + + + + > + )} + + + + > + ); +}); + +export default AIImportItem; diff --git a/app/pages/Import/TaskList/TaskItem/LogModal/index.module.less b/app/pages/Import/TaskList/TaskItem/LogModal/index.module.less index d1b38bd9..5eefa297 100644 --- a/app/pages/Import/TaskList/TaskItem/LogModal/index.module.less +++ b/app/pages/Import/TaskList/TaskItem/LogModal/index.module.less @@ -1,15 +1,18 @@ @import '@app/common.less'; + .logModal { height: 100vh; + :global { .ant-modal { height: 80%; + .ant-modal-content { height: 100%; } } } - + .importModalTitle { display: flex; align-items: center; @@ -25,22 +28,25 @@ border-bottom: none; padding-right: 80px; padding-top: 15px; + .ant-modal-title { display: flex; align-items: center; justify-content: space-between; } + .ant-modal-close { top: 5px; } } + .ant-modal-body { display: flex; height: 91%; } } } - + .logContainer { width: calc(100% - 200px); height: 100%; @@ -51,23 +57,34 @@ background: #333; color: #fff; white-space: nowrap; + display: flex; + flex-direction: column; + gap: 2px; } + .full { width: 100%; } - code { + + pre { font-size: 12px; font-family: inherit; + overflow: visible; + margin: 0; } } + .logTab { height: 100%; + :global { .ant-tabs-nav { width: 200px; + .ant-tabs-tab { background-color: @lightGray; color: @darkBlue; + .ant-tabs-tab-btn { word-break: break-all; width: 100%; @@ -75,20 +92,24 @@ text-align: left; } } + .ant-tabs-tab-active { background-color: @blue; color: #fff; + .ant-tabs-tab-btn { color: #fff; } } + .ant-tabs-nav-list { background-color: @lightGray; } } - .ant-tabs-content-holder > .ant-tabs-content { + + .ant-tabs-content-holder>.ant-tabs-content { display: none } - + } } \ No newline at end of file diff --git a/app/pages/Import/TaskList/TaskItem/LogModal/index.tsx b/app/pages/Import/TaskList/TaskItem/LogModal/index.tsx index 610c34c4..4c8e09c6 100644 --- a/app/pages/Import/TaskList/TaskItem/LogModal/index.tsx +++ b/app/pages/Import/TaskList/TaskItem/LogModal/index.tsx @@ -2,29 +2,20 @@ import { Button, Modal, Tabs } from 'antd'; import { useEffect, useRef, useState } from 'react'; import Icon from '@app/components/Icon'; import { useStore } from '@app/stores'; -import { ITaskStatus } from '@app/interfaces/import'; +import { ITaskItem, ITaskStatus } from '@app/interfaces/import'; import classnames from 'classnames'; import { useI18n } from '@vesoft-inc/i18n'; import styles from './index.module.less'; -interface ILogDimension { - space: string; - id: string; - status: ITaskStatus; -} - interface IProps { - logDimension: ILogDimension; + task: ITaskItem; visible: boolean; onCancel: () => void; } const LogModal = (props: IProps) => { - const { - visible, - onCancel, - logDimension: { space, id, status }, - } = props; + const { visible, onCancel, task } = props; + const { id, space, status, llmJob } = task; const { dataImport: { getLogs, downloadTaskLog, getLogDetail }, moduleConfiguration, @@ -35,24 +26,30 @@ const LogModal = (props: IProps) => { const [logs, setLogs] = useState([]); const [loading, setLoading] = useState(false); const [currentLog, setCurrentLog] = useState(null); + const [logData, setLogData] = useState([]); const handleTabChange = (key: string) => { setCurrentLog(logs.filter((item) => item === key)[0]); }; const getAllLogs = async () => { - const { code, data } = await getLogs(id); - if (code === 0) { - const logs = data.names || []; - setLogs(logs); - setCurrentLog(logs[0]); + if (!llmJob) { + const { code, data } = await getLogs(id); + if (code === 0) { + const logs = data.names || []; + setLogs(logs); + setCurrentLog(logs[0]); + } + } else { + setLogs(['all.log']); + setCurrentLog('all.log'); } }; const handleLogDownload = () => currentLog && downloadTaskLog({ id, name: currentLog }); const readLog = async () => { - const data = await getLogDetail({ id }); + const data = await getLogDetail(task); handleLogData(data); }; @@ -61,6 +58,7 @@ const LogModal = (props: IProps) => { if (!logs.length) { return; } + setLogData(logs.split('\n')); /** * {"level":"info",...} * {"level":"info",...} @@ -72,11 +70,11 @@ const LogModal = (props: IProps) => { * {"level":"info",...} * {"level":"info",...} */ - logRef.current.innerHTML = logs - .split('\n') - .map((log) => `${log}`) - .join(''); - logRef.current.scrollTop = logRef.current.scrollHeight; + // logRef.current.innerHTML = logs + // .split('\n') + // .map((log) => `${log}`) + // .join(''); + // logRef.current.scrollTop = logRef.current.scrollHeight; }; const initLog = async () => { @@ -128,7 +126,15 @@ const LogModal = (props: IProps) => { footer={false} > - + + {logData.map((log, index) => { + return ( + + {log} + + ); + })} + ); }; diff --git a/app/pages/Import/TaskList/TaskItem/index.module.less b/app/pages/Import/TaskList/TaskItem/index.module.less index 791b9ea8..ac9fe398 100644 --- a/app/pages/Import/TaskList/TaskItem/index.module.less +++ b/app/pages/Import/TaskList/TaskItem/index.module.less @@ -1,4 +1,5 @@ @import '@app/common.less'; + .taskItem { background: #FFFFFF; box-shadow: 0px 2px 10px rgba(0, 0, 0, 0.15); @@ -6,6 +7,7 @@ min-height: 125px; margin-top: 20px; padding: 20px 30px; + .row { display: flex; justify-content: space-between; @@ -14,6 +16,7 @@ color: @darkGray; margin-bottom: 15px; align-items: center; + .createTime { font-weight: 600; font-size: 14px; @@ -21,63 +24,77 @@ font-family: Roboto-Light, serif; margin-right: 15px; } + :global(.ant-btn-link) { color: @darkGray; font-weight: 800; } } + .progress { flex: 1; margin-right: 30px; + .progressInfo { display: flex; justify-content: space-between; margin-bottom: 12px; padding-right: calc(2em + 8px); + .taskName { .draftLabel { background: #DBEFFF; border-radius: 4px; padding: 6px 12px; - font-weight: 500; font-size: 14px; color: @darkBlue; margin-right: 10px; } - font-family: Roboto-Bold, serif; + + font-family: Roboto-Bold, + serif; font-style: normal; font-size: 14px; color: @lightBlack; - & > span:not(.draftLabel) { + + &>span:not(.draftLabel) { margin-left: 12px; } + .completeInfo { color: #27AE60; - & > span { + + &>span { margin-right: 6px } + .red { color: @red } } + .errInfo { color: @red } - } + } + .moreInfo { - & > span:not(:last-child) { + &>span:not(:last-child) { margin-right: 30px; } } } + :global(.ant-progress-text) { color: @lightBlack; } } + .operations { :global { .ant-btn { height: 38px; + &:not(:last-child) { margin-right: 15px; } diff --git a/app/pages/Import/TaskList/TaskItem/index.tsx b/app/pages/Import/TaskList/TaskItem/index.tsx index 1155726c..8e697ac4 100644 --- a/app/pages/Import/TaskList/TaskItem/index.tsx +++ b/app/pages/Import/TaskList/TaskItem/index.tsx @@ -17,7 +17,7 @@ interface IProps { data: ITaskItem; onTaskStop: (id: string) => void; onTaskDelete: (id: string) => void; - onViewLog: (id: string, space: string, status: ITaskStatus) => void; + onViewLog: (task: ITaskItem) => void; onRerun: () => void; } @@ -236,7 +236,7 @@ const TaskItem = (props: IProps) => { {!isDraft && !loadingStatus.includes(status) && ( - onViewLog(id, space, status)}> + onViewLog(props.data)}> diff --git a/app/pages/Import/TaskList/index.module.less b/app/pages/Import/TaskList/index.module.less index 1dc274e1..0bc85c7a 100644 --- a/app/pages/Import/TaskList/index.module.less +++ b/app/pages/Import/TaskList/index.module.less @@ -1,18 +1,23 @@ @import '@app/common.less'; + .nebulaDataImport { border-top: 1px solid @gray; + .taskBtns { padding: 15px 0 20px; display: flex; - & > button:not(:last-child) { + + &>button:not(:last-child) { margin-right: 15px; } } + .header { display: flex; justify-content: space-between; align-items: center; } + .taskHeader { display: flex; justify-content: space-between; @@ -23,11 +28,13 @@ padding-bottom: 12px; border-bottom: 1px solid @gray; margin-bottom: 20px; + .label { font-size: 14px; font-style: normal; font-weight: 400; color: @darkGray; + &::after { content: ':'; margin-right: 10px; @@ -40,11 +47,13 @@ margin-top: 20px; text-align: right; } + .emptyTip { padding-top: 100px; display: flex; flex-direction: column; align-items: center; + .box { border: 1px dashed #828282; border-radius: 6px; @@ -52,6 +61,7 @@ width: 578px; margin-bottom: 22px; } + .step { width: 30px; height: 30px; @@ -65,25 +75,30 @@ justify-content: center; margin-bottom: 18px; } + .content { display: flex; justify-content: space-between; align-items: baseline; } + .title { font-weight: 500; font-size: 20px; margin-bottom: 7px; } + .tip { font-weight: 300; font-size: 12px; color: #4F4F4F; } + .btn { width: 100px; height: 30px; } + .arrow { position: relative; width: 0; @@ -95,7 +110,7 @@ bottom: -55px; transform: translateX(-50%); } - + .arrow::before { content: ""; position: absolute; @@ -107,4 +122,14 @@ background-color: #D9D9D9; border-radius: 2px; } +} + +.beta { + border-radius: 3px; + background: #EB5757; + padding: 1px 5px; + border-radius: 3px; + color: #fff; + font-size: 12px; + margin-left: 10px !important; } \ No newline at end of file diff --git a/app/pages/Import/TaskList/index.tsx b/app/pages/Import/TaskList/index.tsx index edf59717..edc3159a 100644 --- a/app/pages/Import/TaskList/index.tsx +++ b/app/pages/Import/TaskList/index.tsx @@ -5,20 +5,17 @@ import { observer } from 'mobx-react-lite'; import Icon from '@app/components/Icon'; import { useStore } from '@app/stores'; import { trackPageView } from '@app/utils/stat'; -import { ITaskStatus } from '@app/interfaces/import'; +import { ITaskItem, ITaskStatus } from '@app/interfaces/import'; import { useI18n } from '@vesoft-inc/i18n'; import DatasourceConfigModal from '../DatasourceList/DatasourceConfig/PlatformConfig'; import LogModal from './TaskItem/LogModal'; import TemplateModal from './TemplateModal'; import styles from './index.module.less'; import TaskItem from './TaskItem'; +import Create from '../AIImport/Create'; +import AIImportItem, { ILLMStatus } from './TaskItem/AIImportItem'; const Option = Select.Option; -interface ILogDimension { - space: string; - id: string; - status: ITaskStatus; -} const TaskList = () => { const timer = useRef(null); @@ -30,13 +27,13 @@ const TaskList = () => { const history = useHistory(); const { taskList, getTaskList, stopTask, deleteTask } = dataImport; const { username, host } = global; - const [modalVisible, setVisible] = useState(false); + const [logTaskItem, setLogTaskItem] = useState(); const [importModalVisible, setImportModalVisible] = useState(false); const [sourceModalVisible, setSourceModalVisible] = useState(false); + const [aiImportModalVisible, setAiImportModalVisible] = useState(false); const [loading, setLoading] = useState(false); const { disableTemplateImport } = moduleConfiguration.dataImport; const modalKey = useMemo(() => Math.random(), [sourceModalVisible]); - const [logDimension, setLogDimension] = useState({} as ILogDimension); const getData = useCallback( (params?: Partial) => { const _filter = { ...filter, ...params }; @@ -60,13 +57,8 @@ const TaskList = () => { } }, []); - const handleLogView = useCallback((id: string, space: string, status: ITaskStatus) => { - setLogDimension({ - space, - id, - status, - }); - setVisible(true); + const handleLogView = useCallback((item: ITaskItem) => { + setLogTaskItem(item); }, []); const initList = useCallback(async () => { setLoading(true); @@ -87,17 +79,11 @@ const TaskList = () => { }; }, []); useEffect(() => { - const loadingStatus = [ITaskStatus.Processing, ITaskStatus.Pending]; + const loadingStatus = [ITaskStatus.Processing, ITaskStatus.Pending, ILLMStatus.Pending, ILLMStatus.Running]; const needRefresh = taskList.list?.filter((item) => loadingStatus.includes(item.status)).length > 0; - if (logDimension.id !== undefined && loadingStatus.includes(logDimension.status)) { - const status = taskList.list?.filter((item) => item.id === logDimension.id)[0]?.status; - if (!loadingStatus.includes(status)) { - setLogDimension({ - id: logDimension.id, - space: logDimension.space, - status, - }); - } + if (logTaskItem?.id !== undefined) { + const task = taskList.list?.find((item) => item.id === logTaskItem.id); + task && setLogTaskItem(task); } if (needRefresh && isMounted.current) { clearTimeout(timer.current); @@ -107,11 +93,6 @@ const TaskList = () => { } }, [taskList]); - useEffect(() => { - if (modalVisible === false) { - setLogDimension({} as ILogDimension); - } - }, [modalVisible]); const emptyTips = useMemo( () => [ { @@ -142,6 +123,13 @@ const TaskList = () => { {intl.get('import.uploadTemp')} )} + {global.appSetting.beta.functions.llmImport && ( + setAiImportModalVisible(true)}> + + {intl.get('llm.aiImport')} + beta + + )} setSourceModalVisible(true)}> @@ -192,16 +180,27 @@ const TaskList = () => { ) : ( - {taskList.list.map((item) => ( - - ))} + {taskList.list.map((item) => + !item.llmJob ? ( + + ) : ( + { + getData(); + }} + key={item.id} + data={item} + onViewLog={handleLogView} + /> + ), + )} { /> )} - {modalVisible && ( - setVisible(false)} visible={modalVisible} /> - )} + {logTaskItem && setLogTaskItem(undefined)} visible={true} />} {importModalVisible && ( setImportModalVisible(false)} @@ -229,6 +226,13 @@ const TaskList = () => { onCancel={() => setSourceModalVisible(false)} onConfirm={() => setSourceModalVisible(false)} /> + { + setAiImportModalVisible(false); + getData(); + }} + /> ); }; diff --git a/app/pages/LLMBot/chat.module.less b/app/pages/LLMBot/chat.module.less new file mode 100644 index 00000000..2310565f --- /dev/null +++ b/app/pages/LLMBot/chat.module.less @@ -0,0 +1,125 @@ +.chat { + height: calc(100vh - 300px); + width: 444px; + display: flex; + flex-direction: column; + + + .chatContent { + flex: 1; + overflow-y: auto; + + .chatContentInner { + padding: 20px; + + .chatMessage { + display: flex; + flex-direction: row; + margin-bottom: 20px; + + .chatMessageInner { + display: flex; + flex-direction: column; + max-width: 90%; + + .chatMessageContent { + padding: 10px; + font-size: 14px; + line-height: 20px; + border-radius: 5px; + letter-spacing: 1px; + box-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); + + p { + margin: 5px 0; + line-height: 20px; + } + } + } + } + + .fromUser { + justify-content: flex-end; + + .chatMessageContent { + background-color: #0084ff; + color: #fff; + } + } + + .fromBot { + justify-content: flex-start; + + .chatMessageContent { + background-color: #f2f2f2; + color: #333; + } + } + } + } + + .codeWrapper { + position: relative; + + :global { + .CodeMirror { + background-color: #092332; + color: #AAAAAA + } + + .CodeMirror-gutters { + background-color: #092332; + color: #AAAAAA; + border: none; + color: #828282; + } + + .CodeMirror-cursor { + border-color: #828282; + } + + .cm-keyword { + color: #19DBFF; + } + + .cm-def { + color: #AE9AFF; + } + } + + >div { + margin: 10px 0; + } + + >.copyBtn { + height: 30px; + border-radius: 6px; + border-color: #CFCFCF; + line-height: 30px; + padding: 0 15px; + background-color: transparent; + color: #0D8BFF; + + &:hover { + background-color: #0D8BFF; + color: #fff; + } + } + } + + .chatInput { + flex: 0; + position: relative; + display: flex; + + textarea { + padding-right: 80px; + } + + button { + position: absolute; + right: 10px; + bottom: 10px; + } + } +} \ No newline at end of file diff --git a/app/pages/LLMBot/chat.tsx b/app/pages/LLMBot/chat.tsx new file mode 100644 index 00000000..4e25f595 --- /dev/null +++ b/app/pages/LLMBot/chat.tsx @@ -0,0 +1,179 @@ +import { Button, Input } from 'antd'; +import styles from './chat.module.less'; +import { useEffect, useRef, useState } from 'react'; +import ws from '@app/utils/websocket'; +import { debounce } from 'lodash'; +import rootStore from '@app/stores'; +import { observer } from 'mobx-react-lite'; +import { useI18n } from '@vesoft-inc/i18n'; +import { LoadingOutlined } from '@ant-design/icons'; +import MonacoEditor from '@app/components/MonacoEditor'; + +function Chat() { + const { intl } = useI18n(); + const llm = rootStore.llm; + const [pending, setPending] = useState(false); + const contentRef = useRef(); + const [messages, setMessages] = useState([]); // [{role: 'user', content: 'hello'}, {role: 'system', content: 'hello'} + const onSend = debounce(async () => { + const { currentInput } = llm; + if (currentInput === '') return; + setPending(true); + // just use last 5 message + const beforeMessages = + rootStore.llm.mode === 'text2cypher' ? [] : [...messages.slice(messages.length - 5, messages.length)]; + const newMessages = [ + ...messages, + { role: 'user', content: currentInput }, + { role: 'assistant', content: '', status: 'pending' }, + ]; + llm.update({ + currentInput: '', + }); + setMessages(newMessages); + const systemPrompt = await rootStore.llm.getDocPrompt(currentInput); + const sendMessages = [ + { + role: 'system', + content: 'You are a helpful NebulaGraph database assistant to help user.', + }, + // slice 100 char + ...beforeMessages.map((item) => ({ + role: item.role, + content: item.content.trim().slice(-100), + })), + { + role: 'user', + content: + (/[\u4e00-\u9fa5]/.test(currentInput) ? '请使用中文' : 'Please use English') + + 'you need use markdown to reply short and clearly. add ``` as markdown code block to write the ngql.one ngql need be one line ' + + systemPrompt, + }, + ]; + console.log(sendMessages); + ws.runChat({ + req: { + stream: true, + temperature: 0.2, + messages: sendMessages, + }, + callback: (res) => { + if (res.message.done) { + newMessages[newMessages.length - 1].status = 'done'; + setPending(false); + return; + } + try { + let text = ''; + // special for qwen api, qwen api will return a hole message + if (llm.config.apiType === 'qwen') { + text = res.message.output.choices[0].message.content || ''; + newMessages[newMessages.length - 1].content = text; + if (res.message.output.choices[0].finish_reason === 'stop') { + newMessages[newMessages.length - 1].status = 'done'; + setPending(false); + return; + } + } else { + if (res.message.choices[0].message === 'stop') { + newMessages[newMessages.length - 1].status = 'done'; + setPending(false); + return; + } + text = res.message.choices[0].delta?.content || ''; + newMessages[newMessages.length - 1].content += text; + } + setMessages([...newMessages]); + } catch (e) { + setPending(false); + } + }, + }); + }, 200); + + useEffect(() => { + if (contentRef.current) { + contentRef.current.scrollTop = contentRef.current.scrollHeight; + } + }, [messages]); + + function renderContent(message: { role: string; content: string; status?: string }) { + if (!message.content && message.status === 'pending') { + return ( + + + loading... + + ); + } + const gqls = message.content.split(/```([^`]+)```/); + return gqls.map((item, index) => { + if (index % 2 === 0) { + return {item}; + } else { + item = item.replace(/^(\n|ngql|gql|cypher)/g, '').replace(/\n$/g, ''); + item = item.replace(/\n\n/, '\n'); + if (message.status !== 'done') { + return {item}; + } + return ( + + + { + rootStore.console.update({ + currentGQL: item, + }); + }} + > + {intl.get('console.copy2NGQL')} + + + ); + } + }); + } + + return ( + { + e.preventDefault(); + e.stopPropagation(); + }} + > + + + {messages.map((item, index) => { + return ( + + + {renderContent(item)} + + + ); + })} + + + + { + llm.update({ + currentInput: e.target.value, + }); + }} + /> + + {intl.get('console.send')} + + + + ); +} + +export default observer(Chat); diff --git a/app/pages/LLMBot/index.module.less b/app/pages/LLMBot/index.module.less new file mode 100644 index 00000000..189a9e81 --- /dev/null +++ b/app/pages/LLMBot/index.module.less @@ -0,0 +1,64 @@ +.llmBot { + position: fixed; + bottom: 20px; + right: 20px; + font-size: 5px; + cursor: pointer; +} + +.llmBotTitle { + font-size: 16px; + + .llmBotHandler { + font-size: 14px; + color: #828282; + } +} + +@size: 60px; + +.ball { + transition: all 0.3s; + position: relative; + border-radius: 50%; + width: @size; + height: @size; + background: transparent; + backdrop-filter: blur(3px); + box-shadow: 0 1px 10px 1px rgba(0, 0, 0, 0.3); + display: flex; + align-items: center; + justify-content: center; + + svg { + font-size: 40px; + } + + .open svg { + fill: url(#llm-icon); + } +} + +:global { + #icon-studio-btn-consoleGTP { + fill: url(#llm-icon) !important; + } +} + + +@keyframes inner-roll { + 0% { + transform: rotate3d(0, 1, 1, 90deg) rotateZ(-572.4deg) + } + + 100% { + transform: rotate3d(1, 0, 0 90deg) rotateZ(572.4deg) + } +} + +.llmBotTitle { + display: flex; + justify-content: space-between; + align-items: center; + width: 100%; +} \ No newline at end of file diff --git a/app/pages/LLMBot/index.tsx b/app/pages/LLMBot/index.tsx new file mode 100644 index 00000000..56ba6139 --- /dev/null +++ b/app/pages/LLMBot/index.tsx @@ -0,0 +1,68 @@ +import { Popover, Switch } from 'antd'; +import styles from './index.module.less'; +import Chat from './chat'; +import Icon from '@app/components/Icon'; +import { observer } from 'mobx-react-lite'; +import rootStore, { useStore } from '@app/stores'; +// float llm bot window +function LLMBot() { + const { global, llm } = useStore(); + if (global.appSetting?.beta?.functions?.text2query.open != true) { + return null; + } + const { open } = llm; + + return ( + <> + { + llm.update({ + open: visible, + }); + }} + content={} + title={ + + AI Asistant + + text2match + { + llm.update({ + mode: checked ? 'text2cypher' : 'text2ngql', + }); + }} + checked={llm.mode == 'text2cypher'} + /> + + + } + trigger={'click'} + > + + + {!open ? ( + + ) : ( + + )} + + + + + + + + + + + + > + ); +} + +export default observer(LLMBot); diff --git a/app/pages/MainPage/Header/HelpMenu/index.tsx b/app/pages/MainPage/Header/HelpMenu/index.tsx index 2245ec8d..7f6d5adf 100644 --- a/app/pages/MainPage/Header/HelpMenu/index.tsx +++ b/app/pages/MainPage/Header/HelpMenu/index.tsx @@ -127,6 +127,14 @@ const HelpMenu = () => { }, ], }, + { + key: 'setting', + label: ( + + + + ), + }, { key: 'user', popupClassName: styles.accountMenu, diff --git a/app/pages/MainPage/Header/index.tsx b/app/pages/MainPage/Header/index.tsx index eebbd254..fb9fec81 100644 --- a/app/pages/MainPage/Header/index.tsx +++ b/app/pages/MainPage/Header/index.tsx @@ -17,60 +17,73 @@ interface IMenuItem { category: string; action: string; label: string; - }, - icon: string, - intlKey: string + }; + icon: string; + intlKey: string; } interface IProps { - menus: IMenuItem[] + menus: IMenuItem[]; } const PageHeader = (props: IProps) => { const { menus } = props; const { intl, currentLocale } = useI18n(); const [activeKey, setActiveKey] = useState(''); - const { global: { username, host } } = useStore(); + const { + global: { username, host }, + } = useStore(); const { pathname } = useLocation(); const handleMenuClick = async ({ key }) => { setActiveKey(key); }; - const MenuItems = useMemo(() => menus.map((item) => ({ - key: item.key, - label: - - {intl.get(item.intlKey)} - - })), [currentLocale]); + const MenuItems = useMemo( + () => + menus.map((item) => ({ + key: item.key, + label: ( + + + {intl.get(item.intlKey)} + + ), + })), + [currentLocale], + ); useEffect(() => { const activeKey = pathname.split('/')[1]; setActiveKey(activeKey); }, [pathname]); - return - - - - {host && username ? <> - - - > - : } - ; + return ( + + + + + {host && username ? ( + <> + + + > + ) : ( + + )} + + ); }; export default observer(PageHeader); diff --git a/app/pages/MainPage/index.tsx b/app/pages/MainPage/index.tsx index 2773059e..11d381ee 100644 --- a/app/pages/MainPage/index.tsx +++ b/app/pages/MainPage/index.tsx @@ -4,6 +4,7 @@ import { Redirect, Route, Switch } from 'react-router-dom'; import { shouldAlwaysShowWelcome } from '@app/pages/Welcome'; import ErrorBoundary from '@app/components/ErrorBoundary'; import { MENU_LIST, RoutesList } from './routes'; +import LLMBot from '../LLMBot'; import './index.less'; import Header from './Header'; @@ -35,6 +36,7 @@ const MainPage = () => { + ); }; diff --git a/app/pages/MainPage/routes.tsx b/app/pages/MainPage/routes.tsx index 86a1ec63..02f00cd8 100644 --- a/app/pages/MainPage/routes.tsx +++ b/app/pages/MainPage/routes.tsx @@ -1,4 +1,5 @@ import { lazy } from 'react'; +import Setting from '../Setting'; const Schema = lazy(() => import('@app/pages/Schema')); const Console = lazy(() => import('@app/pages/Console')); @@ -9,7 +10,6 @@ const TaskCreate = lazy(() => import('@app/pages/Import/TaskCreate')); const SketchModeling = lazy(() => import('@app/pages/SketchModeling')); const Welcome = lazy(() => import('@app/pages/Welcome')); - export const RoutesList = [ { path: '/schema', @@ -54,6 +54,11 @@ export const RoutesList = [ component: Welcome, exact: true, }, + { + path: '/setting', + component: Setting, + exact: true, + }, ]; export const MENU_LIST = [ @@ -63,10 +68,10 @@ export const MENU_LIST = [ track: { category: 'navigation', action: 'view_schema', - label: 'from_navigation' + label: 'from_navigation', }, icon: 'icon-studio-nav-schema', - intlKey: 'common.schema' + intlKey: 'common.schema', }, { key: 'import', @@ -74,10 +79,10 @@ export const MENU_LIST = [ track: { category: 'navigation', action: 'view_import', - label: 'from_navigation' + label: 'from_navigation', }, icon: 'icon-studio-nav-import', - intlKey: 'common.import' + intlKey: 'common.import', }, { key: 'console', @@ -85,10 +90,10 @@ export const MENU_LIST = [ track: { category: 'navigation', action: 'view_console', - label: 'from_navigation' + label: 'from_navigation', }, icon: 'icon-studio-nav-console', - intlKey: 'common.console' + intlKey: 'common.console', }, { key: 'sketch', @@ -96,9 +101,9 @@ export const MENU_LIST = [ track: { category: 'navigation', action: 'view_sketch', - label: 'from_navigation' + label: 'from_navigation', }, icon: 'icon-navbar-sketch', - intlKey: 'common.sketch' + intlKey: 'common.sketch', }, -]; \ No newline at end of file +]; diff --git a/app/pages/Schema/SchemaConfig/Create/IndexCreate/index.module.less b/app/pages/Schema/SchemaConfig/Create/IndexCreate/index.module.less index bd7a198d..1d2f7a3d 100644 --- a/app/pages/Schema/SchemaConfig/Create/IndexCreate/index.module.less +++ b/app/pages/Schema/SchemaConfig/Create/IndexCreate/index.module.less @@ -1,13 +1,17 @@ @import '@app/common.less'; + .indexCreatePage { padding-bottom: 100px; + .btnFieldAdd { margin-bottom: 20px; } + .viewRow { padding-top: 24px; border-top: 1px solid @gray; } + .dragItem { display: flex; align-items: center; @@ -20,9 +24,11 @@ font-size: 14px; padding: 12px 12px 12px 16px; margin-bottom: 5px; + :global(.ant-tag-close-icon) { margin-left: 10px; color: @darkBlue; + svg { width: 14px; height: 14px; @@ -36,16 +42,18 @@ .ant-modal-content .ant-modal-header { border-bottom: none; } + .ant-modal-content .ant-modal-body { text-align: center; } } - + .modalItem { display: flex; align-items: center; justify-content: center; margin-bottom: 20px; + :global(.studioIconInstruction) { svg { width: 23px; @@ -59,10 +67,12 @@ width: 330px; border: 1px solid @gray; border-radius: 3px; + :global { .ant-select-selector { border: none; } + .ant-select-arrow svg { color: @darkBlue; } @@ -78,9 +88,11 @@ .ant-modal-footer { border-top: none; text-align: center; + .ant-btn { width: 140px; } + .ant-btn-default { border: 1px solid @darkBlue; border-radius: 3px; diff --git a/app/pages/Setting/index.module.less b/app/pages/Setting/index.module.less new file mode 100644 index 00000000..d559bf9a --- /dev/null +++ b/app/pages/Setting/index.module.less @@ -0,0 +1,150 @@ +@import '@app/common.less'; + +.tips { + color: #828282; + font-size: 12px; +} + +.settingPage { + width: @containerWidth; + margin: 0 auto; + padding: 25px 0; + + & :global { + .ant-card { + &:not(:last-child) { + margin-bottom: 20px; + } + + .ant-card-head { + padding: 0 16px; + + .ant-card-head-title { + padding: 10px 0; + } + } + + .ant-card-body { + padding: 16px; + } + } + } +} + +.pageTitle { + font-family: Roboto-Regular; + font-weight: bold; + font-size: 18px; + padding-bottom: 12px; + border-bottom: 1px solid @gray; +} + +.pageContent { + .cardTitle { + display: flex; + align-items: center; + } + + .settingItem { + width: 980px; + margin: auto; + border-bottom: 1px solid #d5ddeb; + padding: 30px 0; + + .title { + width: 200px; + display: flex; + } + } + + .betaFunItem { + display: flex; + + &:not(:last-child) { + padding-bottom: 12px; + margin-bottom: 12px; + border-bottom: 1px solid #f5f5f5; + } + + .itemContent { + margin-left: 12px; + + .betaFunName { + margin-right: 16px; + // flex: 0 0 100px; + } + } + + .tips { + margin-top: 6px; + } + } + + .sliderWrapper { + display: flex; + align-items: center; + font-size: 14px; + + :global { + .ant-slider { + .ant-slider-rail { + height: 6px; + } + + .ant-slider-track { + height: 6px; + } + + .ant-slider-step { + height: 6px; + } + + .ant-slider-handle { + width: 16px; + height: 16px; + } + } + + .ant-input-number-input { + height: 30px; + } + } + } + + & :global { + .ant-table-thead>tr>th { + background-color: #fafafa; + } + } + + .imageSwitcher { + display: flex; + flex-direction: column; + + .imageWrapper { + width: 320px; + overflow: hidden; + background-color: #2f3a4a; + + img { + width: 100%; + height: 100%; + object-fit: cover; + } + } + + .avatarUploader { + margin-top: 12px; + + .uploadBtn { + text-align: center; + padding: 2px 12px; + height: 32px; + } + + .tips { + margin-left: 12px; + } + } + } +} \ No newline at end of file diff --git a/app/pages/Setting/index.tsx b/app/pages/Setting/index.tsx new file mode 100644 index 00000000..c645aac0 --- /dev/null +++ b/app/pages/Setting/index.tsx @@ -0,0 +1,151 @@ +import { useCallback, useEffect, useState } from 'react'; +import { observer } from 'mobx-react-lite'; +import { Button, Col, Form, Input, InputNumber, Row, Select, Switch, message } from 'antd'; +import { useI18n } from '@vesoft-inc/i18n'; +import { useStore } from '@app/stores'; +// import LanguageSelect from '@app/components/LanguageSelect'; +import { trackEvent } from '@app/utils/stat'; +import styles from './index.module.less'; +import LanguageSelect from '../Login/LanguageSelect'; +import { useForm } from 'antd/lib/form/Form'; +import { post } from '@app/utils/http'; + +const Setting = observer(() => { + const { intl } = useI18n(); + const { global, llm } = useStore(); + const { appSetting, saveAppSetting } = global; + const [form] = useForm(); + const [apiType, setApiType] = useState('openai'); + useEffect(() => { + initForm(); + }, []); + + const initForm = async () => { + await llm.fetchConfig(); + form.setFieldsValue(llm.config); + setApiType(llm.config.apiType); + }; + + const updateAppSetting = useCallback(async (param: Partial) => { + // make it loading for a while, so it looks more smooth + saveAppSetting({ beta: { ...global?.appSetting?.beta, ...param } }); + trackEvent('setting', 'update_app_setting'); + }, []); + + const onSubmitLLMForm = useCallback(() => { + form.validateFields().then((values) => { + const { apiType, url, key, maxContextLength, ...config } = values; + post('/api/config/llm')({ + apiType, + url, + key, + maxContextLength, + config: JSON.stringify(config), + }).then((res) => { + if (res.code === 0) { + message.success(intl.get('common.success')); + llm.setConfig(values); + } + }); + }); + }, [form]); + + const { open = true, functions } = appSetting?.beta || {}; + + return ( + + {intl.get('setting.globalSetting')} + + + + {intl.get('common.language')} + + + + + + + + {intl.get('setting.betaFunction')} + + updateAppSetting({ open: checked })} + /> + + + + + updateAppSetting({ functions: { ...functions, viewSchema: { open } } })} + /> + + {intl.get('common.viewSchema')} + {intl.get('setting.viewScmemaBetaFunDesc')} + + + + updateAppSetting({ functions: { ...functions, text2query: { open } } })} + /> + + {intl.get('setting.text2query')} + {intl.get('setting.text2queryDesc')} + + + + updateAppSetting({ functions: { ...functions, llmImport: { open } } })} + /> + + {intl.get('setting.llmImport')} + {intl.get('setting.llmImportDesc')} + + + { + setApiType(value); + }} + defaultValue="openai" + style={{ width: 120 }} + > + OpenAI + Aliyun + + + + + + + + + {apiType === 'qwen' && ( + + + + )} + + + + + + {intl.get('setting.verify')} + + + + + + + + + + ); +}); +export default Setting; diff --git a/app/stores/global.ts b/app/stores/global.ts index cb7bfaff..a5df3317 100644 --- a/app/stores/global.ts +++ b/app/stores/global.ts @@ -4,15 +4,18 @@ import { Base64 } from 'js-base64'; import { BrowserHistory } from 'history'; import service from '@app/config/service'; import ngqlRunner from '@app/utils/websocket'; -import { isValidIP } from '@app/utils/function'; +import { isValidIP, safeParse } from '@app/utils/function'; import { getRootStore, resetStore } from '.'; export class GlobalStore { gConfig = window.gConfig; appSetting = { beta: { + open: true, functions: { viewSchema: { open: true }, + text2query: { open: true }, + llmImport: { open: true }, }, }, }; @@ -28,11 +31,22 @@ export class GlobalStore { _username: observable, _host: observable, ngqlRunner: observable.ref, + appSetting: observable.ref, update: action, + saveAppSetting: action, }); this.ngqlRunner.logoutFun = this.logout; + const cacheAppSetting = localStorage.getItem('appSetting'); + if (cacheAppSetting) { + this.appSetting = safeParse(cacheAppSetting); + } } + saveAppSetting = (payload: any) => { + this.appSetting = payload; + localStorage.setItem('appSetting', JSON.stringify(payload)); + }; + get rootStore() { return getRootStore(); } @@ -44,6 +58,8 @@ export class GlobalStore { return this._host || cookies.get('nh'); } + useLocalObservable; + resetModel = () => { this.update({ _username: '', diff --git a/app/stores/import.ts b/app/stores/import.ts index 3a3c802d..14f092a1 100644 --- a/app/stores/import.ts +++ b/app/stores/import.ts @@ -341,7 +341,7 @@ export class ImportStore { trackEvent('import', 'download_task_log'); }; - getLogDetail = async (params: { id: string }) => { + getLogDetail = async (params: ITaskItem) => { const { code, data } = await service.getLogDetail(params); if (code === 0) { return data; diff --git a/app/stores/index.ts b/app/stores/index.ts index 831c7af2..d39a0433 100644 --- a/app/stores/index.ts +++ b/app/stores/index.ts @@ -9,6 +9,7 @@ import sketchModel from './sketchModel'; import welcome from './welcome'; import datasource from './datasource'; import moduleConfiguration from './moduleConfiguration'; +import llm from './llm'; const rootStore = { global, @@ -21,6 +22,7 @@ const rootStore = { welcome, datasource, moduleConfiguration, + llm, }; const rootStoreRef = { current: rootStore }; // @ts-ignore diff --git a/app/stores/llm.ts b/app/stores/llm.ts new file mode 100644 index 00000000..93fa73e2 --- /dev/null +++ b/app/stores/llm.ts @@ -0,0 +1,292 @@ +import { makeAutoObservable } from 'mobx'; +import schema from './schema'; +import { get } from '@app/utils/http'; +import rootStore from '.'; +import ws from '@app/utils/websocket'; +import { safeParse } from '@app/utils/function'; +import * as ngqlDoc from '@app/utils/ngql'; + +export const matchPrompt = `Use NebulaGraph match knowledge to help me answer question. +Use only the provided relationship types and properties in the schema. +Do not use any other relationship types or properties that are not provided. +Schema: +--- +{schema} +--- +Note: NebulaGraph speaks a dialect of Cypher, comparing to standard Cypher: +1. it uses double equals sign for comparison: == rather than = +2. it needs explicit label specification when referring to node properties, i.e. +v is a variable of a node, and we know its label is Foo, v.foo.name is correct +while v.name is not. +For example, see this diff between standard and NebulaGraph Cypher dialect: +diff +< MATCH (p:person)-[:directed]->(m:movie) WHERE m.name = 'The Godfather' +< RETURN p.name; +--- +> MATCH (p:person)-[:directed]->(m:movie) WHERE m.movie.name == 'The Godfather' +> RETURN p.person.name; +Question:{query_str} +`; +export const llmImportPrompt = `You are knowledge graph Expert. +please extract relationship data from the text below, referring to the schema of the graph, and return the results in the following JSON format without interpreting, don't explain just return the results directly. +{ + "nodes":[{ "name":"","type":"","props":{} }], + "edges":[{ "src":"","dst":"","edgeType":"","props":{} }] +} +The schema of the graph is: {spaceSchema} +The text is: {text} +The result is: +`; +export const llmImportTask = `please excute the task below,and return the result,dont' explain,just return the result directly. +{ + "task": "extract relationships", + "instructions": { + "text": "{text}", + "graphSchema": "{spaceSchema}", + "format": { + "nodes": [{ + "name": "", + "type": "", + "props": {} + }], + "edges": [{ + "src": "", + "dst": "", + "type": "", + "props": "{props}" + }] + } + } +} +reuslt:`; + +export interface LLMConfig { + url: string; + apiType: string; + llmVersion: string; + key: string; + features: string[]; + maxContextLength: number; + enableCopilot: boolean; + enableLLM2NGQLs: boolean; + gqlPath: string; +} +class LLM { + currentInput = ''; + open = false; + config = { + maxContextLength: 2000, + url: 'https://{your-resource-name}.openai.azure.com/openai/deployments/{deployment-id}/chat/completions?api-version={api-version}', + apiType: 'openai', + features: ['spaceSchema', 'useConsoleNGQL'], + } as LLMConfig; + widget: HTMLSpanElement; + editor: any; + mode = 'text2ngql' as 'text2ngql' | 'text2cypher'; + completionList: { text: string; type: string }[] = []; + constructor() { + makeAutoObservable(this, { + editor: false, + widget: false, + }); + this.fetchConfig(); + } + + fetchConfig() { + return get('/api/config/llm')().then((res) => { + if (res.code != 0 || !res.data) return; + const { config, ...values } = res.data.config; + const configMap = config ? safeParse(config) : {}; + this.setConfig({ + ...configMap, + ...values, + gqlPath: res.data.gqlPath, + }); + return this.config; + }); + } + + setConfig(payload: LLMConfig) { + this.config = { ...this.config, ...payload }; + } + + update(payload: any) { + Object.assign(this, payload); + } + + async getSpaceSchema(space: string) { + let finalPrompt: any = { + spaceName: space, + }; + if (this.config.features.includes('spaceSchema')) { + await schema.switchSpace(space); + await schema.getTagList(); + await schema.getEdgeList(); + const tagList = schema.tagList; + const edgeList = schema.edgeList; + finalPrompt = { + ...finalPrompt, + vidType: schema.spaceVidType, + nodeTypes: tagList.map((item) => { + return { + type: item.name, + props: item.fields.map((item) => { + return { + name: item.Field, + dataType: item.Type, + nullable: (item as any).Null === 'YES', + }; + }), + }; + }), + edgeTypes: edgeList.map((item) => { + return { + type: item.name, + props: item.fields.map((item) => { + return { + name: item.Field, + dataType: item.Type, + nullable: (item as any).Null === 'YES', + }; + }), + }; + }), + }; + } + return JSON.stringify(finalPrompt); + } + + async getDocPrompt(text: string) { + let prompt = matchPrompt; // default use text2cypher + if (this.mode !== 'text2cypher') { + text = text.replaceAll('"', "'"); + const res = (await ws.runChat({ + req: { + temperature: 0.5, + stream: false, + max_tokens: 20, + messages: [ + { + role: 'user', + content: `From the following graph database book categories: "${ngqlDoc.NGQLCategoryString}" find top two useful categories to solve the question:"${text}",don't explain,just return the two combined categories, separated by ',' is:`, + }, + ], + }, + })) as any; + if (res.code === 0) { + const url = res.message.choices[0].message?.content as string; + const paths = url + .toLowerCase() + .replaceAll(/\s|"|\\/g, '') + .split(','); + console.log('select doc url:', paths); + if (ngqlDoc.ngqlMap[paths[0]]) { + let doc = ngqlDoc.ngqlMap[paths[0]].content; + if (!doc) { + doc = ''; + } + const doc2 = ngqlDoc.ngqlMap[paths[1]].content; + if (doc2) { + doc += doc2; + } + doc = doc.replaceAll(/\n\n+/g, ''); + if (doc.length) { + console.log('docString:', doc); + prompt = `learn the below doc, and use it to help user ,the user space schema is "{schema}" the doc is: \n${doc.slice( + 0, + this.config.maxContextLength, + )} the question is "{query_str}"`; + } + } + } + } + prompt = prompt.replace('{query_str}', text); + const pathname = window.location.pathname; + const space = pathname.indexOf('schema') > -1 ? rootStore.schema.currentSpace : rootStore.console.currentSpace; + if (!space) { + return prompt.replace('{schema}', 'no space selected'); + } + let schemaPrompt = await this.getSpaceSchema(space); + + if (this.config.features.includes('useConsoleNGQL')) { + schemaPrompt += `\nuser console ngql context: ${rootStore.console.currentGQL}`; + } + prompt = prompt.replace('{schema}', schemaPrompt); + return prompt; + } + + timer; + running = false; + async checkCopilotList(cm: any) { + clearTimeout(this.timer); + this.timer = setTimeout(async () => { + let snippet = ''; + const cursor = cm.getCursor(); + const line = cm.getLine(cursor.line).split(';').pop(); + if (cursor.ch < line.length - 1) return; + if (line.length < 3) return; + const tokens = line.split(' '); + const firstToken = tokens.find((item) => item.replaceAll(' ', '').length > 0); + const hits = ngqlDoc.ngqlDoc.filter((each) => each.title.toLowerCase().indexOf(firstToken.toLowerCase()) === 0); + let doc = ''; + if (this.mode == 'text2cypher' && firstToken.toLowerCase() == 'match') { + doc += matchPrompt; + } else { + if (hits.length) { + hits.find((item) => { + if (doc.length > this.config.maxContextLength) return true; + doc += item + '\n'; + }); + } + } + if (!doc) { + return; + } + this.running = true; + cm.closeHint(); + const schema = await this.getSpaceSchema(rootStore.console.currentSpace); + const res = (await ws.runChat({ + req: { + temperature: 1.0, + stream: false, + presence_penalty: 0.6, + max_tokens: 30, + messages: [ + { + role: 'user', + content: `As a NebulaGraph NGQL code autocomplete copilot, you have access to the following information: document "${doc}" and user space schema "${schema}". + Use this information to guess the user's next NGQL code autocomplete as accurately as possible. + Please provide your guess as a response without any prefix words. + Don't explain anything. + the next autocomplete text can combine with the given text. + if you can't guess, say "Sorry", + The user's NGQL text is: ${line} + the most possible 2 next autocomplete text is:`, + }, + ], + }, + })) as any; + if (res.code === 0) { + snippet = res.message.choices[0].message?.content; + console.log(snippet); + if (snippet.indexOf('Sorry') > -1) { + snippet = ''; + } + } + if (snippet) { + this.update({ + completionList: snippet + .split('\n') + .map((each) => ({ + type: 'copilot', + text: each, + })) + .filter((item) => item.text !== ''), + }); + } + this.running = false; + }, 3000); + } +} + +export default new LLM(); diff --git a/app/stores/schema.ts b/app/stores/schema.ts index 76357198..43f8802e 100644 --- a/app/stores/schema.ts +++ b/app/stores/schema.ts @@ -270,7 +270,10 @@ export class SchemaStore { }; const { code, data } = await this.getTagOrEdgeInfo(ISchemaEnum.Edge, item, space); if (code === 0) { - edge.fields = data.tables; + edge.fields = data.tables.map((item) => ({ + Field: item.Field, + Type: item.Type, + })); } edgeList.push(edge); }), diff --git a/app/utils/file.ts b/app/utils/file.ts index 27fe56ac..a208cdd6 100644 --- a/app/utils/file.ts +++ b/app/utils/file.ts @@ -10,11 +10,14 @@ export function readFileContent(file) { export function getFileSize(size: number) { const units = ['B', 'KB', 'MB', 'GB', 'TB'] as const; const gap = 1 << 10; + if (!size) { + return '0 B'; + } for (let i = 0, byte = gap; i < units.length; i++, byte *= gap) { if (size < byte || i === units.length - 1) { const unitSize = ((size * gap) / byte).toFixed(2); - return `${unitSize} ${units[i]}` as `${number} ${typeof units[number]}`; + return `${unitSize} ${units[i]}` as `${number} ${(typeof units)[number]}`; } } } diff --git a/app/utils/ngql.json b/app/utils/ngql.json new file mode 100644 index 00000000..99ae15e4 --- /dev/null +++ b/app/utils/ngql.json @@ -0,0 +1 @@ +[{"title":"Welcome to NebulaGraph 3.6.0 Documentation","content":"NebulaGraph is a distributed, scalable, and lightning-fast graph database. It is the optimal solution in the world capable of hosting graphs with dozens of billions of vertices (nodes) and trillions of edges (relationships) with millisecond latency.\nGetting started\nQuick start\nPreparations before deployment\nnGQL cheatsheet\nFAQ\nEcosystem Tools\nLive Demo\nRelease notes\nNebulaGraph Community Edition 3.6.0\nNebulaGraph Dashboard Community\nNebulaGraph Studio\nOther Sources\nTo cite NebulaGraph\nForum\nNebulaGraph Homepage\nBlogs\nVideos\nChinese Docs\nSymbols used in this manual\nModify errors\nThis NebulaGraph manual is written in the Markdown language. Users can click the pencil sign on the upper right side of each document title and modify errors.","url":"https://docs.nebula-graph.io/3.6.0/.","type":"doc"},{"title":"What is NebulaGraph","content":"NebulaGraph is an open-source, distributed, easily scalable, and native graph database. It is capable of hosting graphs with hundreds of billions of vertices and trillions of edges, and serving queries with millisecond-latency. \nWhat is a graph database\nA graph database, such as NebulaGraph, is a database that specializes in storing vast graph networks and retrieving information from them. It efficiently stores data as vertices (nodes) and edges (relationships) in labeled property graphs. Properties can be attached to both vertices and edges. Each vertex can have one or multiple tags (labels).\nGraph databases are well suited for storing most kinds of data models abstracted from reality. Things are connected in almost all fields in the world. Modeling systems like relational databases extract the relationships between entities and squeeze them into table columns alone, with their types and properties stored in other columns or even other tables. This makes data management time-consuming and cost-ineffective.\nNebulaGraph, as a typical native graph database, allows you to store the rich relationships as edges with edge types and properties directly attached to them.\nAdvantages of NebulaGraph\nOpen source\nNebulaGraph is open under the Apache 2.0 License. More and more people such as database developers, data scientists, security experts, and algorithm engineers are participating in the designing and development of NebulaGraph. To join the opening of source code and ideas, surf the NebulaGraph GitHub page.\nOutstanding performance\nWritten in C++ and born for graphs, NebulaGraph handles graph queries in milliseconds. Among most databases, NebulaGraph shows superior performance in providing graph data services. The larger the data size, the greater the superiority of NebulaGraph.For more information, see NebulaGraph benchmarking.\nHigh scalability\nNebulaGraph is designed in a shared-nothing architecture and supports scaling in and out without interrupting the database service.\nDeveloper friendly\nNebulaGraph supports clients in popular programming languages like Java, Python, C++, and Go, and more are under development. For more information, see NebulaGraph clients.\nReliable access control\nNebulaGraph supports strict role-based access control and external authentication servers such as LDAP (Lightweight Directory Access Protocol) servers to enhance data security. For more information, see Authentication and authorization.\nDiversified ecosystem\nMore and more native tools of NebulaGraph have been released, such as NebulaGraph Studio, NebulaGraph Console, and NebulaGraph Exchange. For more ecosystem tools, see Ecosystem tools overview.\nBesides, NebulaGraph has the ability to be integrated with many cutting-edge technologies, such as Spark, Flink, and HBase, for the purpose of mutual strengthening in a world of increasing challenges and chances.\nOpenCypher-compatible query language\nThe native NebulaGraph Query Language, also known as nGQL, is a declarative, openCypher-compatible textual query language. It is easy to understand and easy to use. For more information, see nGQL guide.\nFuture-oriented hardware with balanced reading and writing\nSolid-state drives have extremely high performance and they are getting cheaper. NebulaGraph is a product based on SSD. Compared with products based on HDD and large memory, it is more suitable for future hardware trends and easier to achieve balanced reading and writing.\nEasy data modeling and high flexibility\nYou can easily model the connected data into NebulaGraph for your business without forcing them into a structure such as a relational table, and properties can be added, updated, and deleted freely. For more information, see Data modeling.\nHigh popularity\nNebulaGraph is being used by tech leaders such as Tencent, Vivo, Meituan, and JD Digits. For more information, visit the NebulaGraph official website.\nUse cases\nNebulaGraph can be used to support various graph-based scenarios. To spare the time spent on pushing the kinds of data mentioned in this section into relational databases and on bothering with join queries, use NebulaGraph.\nFraud detection\nFinancial institutions have to traverse countless transactions to piece together potential crimes and understand how combinations of transactions and devices might be related to a single fraud scheme. This kind of scenario can be modeled in graphs, and with the help of NebulaGraph, fraud rings and other sophisticated scams can be easily detected.\nReal-time recommendation\nNebulaGraph offers the ability to instantly process the real-time information produced by a visitor and make accurate recommendations on articles, videos, products, and services.\nIntelligent question-answer system\nNatural languages can be transformed into knowledge graphs and stored in NebulaGraph. A question organized in a natural language can be resolved by a semantic parser in an intelligent question-answer system and re-organized. Then, possible answers to the question can be retrieved from the knowledge graph and provided to the one who asked the question.\nSocial networking\nInformation on people and their relationships is typical graph data. NebulaGraph can easily handle the social networking information of billions of people and trillions of relationships, and provide lightning-fast queries for friend recommendations and job promotions in the case of massive concurrency.\nRelated links\nOfficial website\nDocs\nBlogs\nForum\nGitHub","url":"https://docs.nebula-graph.io/3.6.0/1.introduction/1.what-is-nebula-graph/","type":"doc"},{"title":"Data modeling","content":"A data model is a model that organizes data and specifies how they are related to one another. This topic describes the Nebula Graph data model and provides suggestions for data modeling with NebulaGraph.\nData structures\nNebulaGraph data model uses six data structures to store data. They are graph spaces, vertices, edges, tags, edge types and properties.\nGraph spaces: Graph spaces are used to isolate data from different teams or programs. Data stored in different graph spaces are securely isolated. Storage replications, privileges, and partitions can be assigned.\nVertices: Vertices are used to store entities.\nIn NebulaGraph, vertices are identified with vertex identifiers (i.e. VID). The VID must be unique in the same graph space. VID should be int64, or fixed_string(N).\nA vertex has zero to multiple tags.\nEdges: Edges are used to connect vertices. An edge is a connection or behavior between two vertices.\nThere can be multiple edges between two vertices.\nEdges are directed. -> identifies the directions of edges. Edges can be traversed in either direction.\nAn edge is identified uniquely with . Edges have no EID.\nAn edge must have one and only one edge type.\nThe rank value is an immutable user-assigned 64-bit signed integer. It identifies the edges with the same edge type between two vertices. Edges are sorted by their rank values. The edge with the greatest rank value is listed first. The default rank value is zero.\nTags: Tags are used to categorize vertices. Vertices that have the same tag share the same definition of properties.\nEdge types: Edge types are used to categorize edges. Edges that have the same edge type share the same definition of properties.\nProperties: Properties are key-value pairs. Both vertices and edges are containers for properties.\nDirected property graph\nNebulaGraph stores data in directed property graphs. A directed property graph has a set of vertices connected by directed edges. Both vertices and edges can have properties. A directed property graph is represented as:\nG = < V, E, PV, PE >\nV is a set of vertices.\nE is a set of directed edges.\nPV is the property of vertices.\nPE is the property of edges.\nThe following table is an example of the structure of the basketball player dataset. We have two types of vertices, that is player and team, and two types of edges, that is serve and follow.\nElement\nName\nProperty name (Data type)\nDescription\nTag\nplayer\nname (string) age (int)\nRepresents players in the team.\nTag\nteam\nname (string)\nRepresents the teams.\nEdge type\nserve\nstart_year (int) end_year (int)\nRepresents actions taken by players in the team.An action links a player with a team, and the direction is from a player to a team.\nEdge type\nfollow\ndegree (int)\nRepresents actions taken by players in the team.An action links a player with another player, and the direction is from one player to the other player.","url":"https://docs.nebula-graph.io/3.6.0/1.introduction/2.data-model/","type":"doc"},{"title":"Path types","content":"In graph theory, a path in a graph is a finite or infinite sequence of edges which joins a sequence of vertices. Paths are fundamental concepts of graph theory.\nPaths can be categorized into 3 types: walk, trail, and path. For more information, see Wikipedia.\nThe following figure is an example for a brief introduction.\nWalk\nA walk is a finite or infinite sequence of edges. Both vertices and edges can be repeatedly visited in graph traversal.\nIn the above figure C, D, and E form a cycle. So, this figure contains infinite paths, such as A->B->C->D->E, A->B->C->D->E->C, and A->B->C->D->E->C->D.\nTrail\nA trail is a finite sequence of edges. Only vertices can be repeatedly visited in graph traversal. The Seven Bridges of Königsberg is a typical trail.\nIn the above figure, edges cannot be repeatedly visited. So, this figure contains finite paths. The longest path in this figure consists of 5 edges: A->B->C->D->E->C.\nThere are two special cases of trail, cycle and circuit. The following figure is an example for a brief introduction.\ncycle\nA cycle refers to a closed trail. Only the terminal vertices can be repeatedly visited. The longest path in this figure consists of 3 edges: A->B->C->A or C->D->E->C.\ncircuit\nA circuit refers to a closed trail. Edges cannot be repeatedly visited in graph traversal. Apart from the terminal vertices, other vertices can also be repeatedly visited. The longest path in this figure: A->B->C->D->E->C->A.\nPath\nA path is a finite sequence of edges. Neither vertices nor edges can be repeatedly visited in graph traversal.\nSo, the above figure contains finite paths. The longest path in this figure consists of 4 edges: A->B->C->D->E.","url":"https://docs.nebula-graph.io/3.6.0/1.introduction/2.1.path/","type":"doc"},{"title":"VID","content":"In a graph space, a vertex is uniquely identified by its ID, which is called a VID or a Vertex ID.\nFeatures\nThe data types of VIDs are restricted to FIXED_STRING() or INT64. One graph space can only select one VID type.\nA VID in a graph space is unique. It functions just as a primary key in a relational database. VIDs in different graph spaces are independent.\nThe VID generation method must be set by users, because NebulaGraph does not provide auto increasing ID, or UUID.\nVertices with the same VID will be identified as the same one. For example:\nA VID is the unique identifier of an entity, like a person's ID card number. A tag means the type of an entity, such as driver, and boss. Different tags define two groups of different properties, such as driving license number, driving age, order amount, order taking alt, and job number, payroll, debt ceiling, business phone number.\nWhen two INSERT statements (neither uses a parameter of IF NOT EXISTS) with the same VID and tag are operated at the same time, the latter INSERT will overwrite the former.\nWhen two INSERT statements with the same VID but different tags, like TAG A and TAG B, are operated at the same time, the operation of Tag A will not affect Tag B.\nVIDs will usually be indexed and stored into memory (in the way of LSM-tree). Thus, direct access to VIDs enjoys peak performance.\nVID Operation\nNebulaGraph 1.x only supports INT64 while NebulaGraph 2.x supports INT64 and FIXED_STRING(). In CREATE SPACE, VID types can be set via vid_type.\nid() function can be used to specify or locate a VID.\nLOOKUP or MATCH statements can be used to find a VID via property index.\nDirect access to vertices statements via VIDs enjoys peak performance, such as DELETE xxx WHERE id(xxx) == \"player100\" or GO FROM \"player100\". Finding VIDs via properties and then operating the graph will cause poor performance, such as LOOKUP | GO FROM $-.ids, which will run both LOOKUP and | one more time.\nVID Generation\nVIDs can be generated via applications. Here are some tips:\n(Optimal) Directly take a unique primary key or property as a VID. Property access depends on the VID.\nGenerate a VID via a unique combination of properties. Property access depends on property index.\nGenerate a VID via algorithms like snowflake. Property access depends on property index.\nIf short primary keys greatly outnumber long primary keys, do not enlarge the N of FIXED_STRING() too much. Otherwise, it will occupy a lot of memory and hard disks, and slow down performance. Generate VIDs via BASE64, MD5, hash by encoding and splicing.\nIf you generate int64 VID via hash, the probability of collision is about 1/10 when there are 1 billion vertices. The number of edges has no concern with the probability of collision.\nDefine and modify a VID and its data type\nThe data type of a VID must be defined when you create the graph space. Once defined, it cannot be modified.\nA VID is set when you insert a vertex and cannot be modified. \nQuery start vid and global scan\nIn most cases, the execution plan of query statements in NebulaGraph (MATCH, GO, and LOOKUP) must query the start vid in a certain way.\nThere are only two ways to locate start vid:\nFor example, GO FROM \"player100\" OVER explicitly indicates in the statement that start vid is \"player100\".\nFor example, LOOKUP ON player WHERE player.name == \"Tony Parker\" or MATCH (v:player {name:\"Tony Parker\"}) locates start vid by the index of the property player.name.","url":"https://docs.nebula-graph.io/3.6.0/1.introduction/3.vid/","type":"doc"},{"title":"Architecture overview","content":"NebulaGraph consists of three services: the Graph Service, the Storage Service, and the Meta Service. It applies the separation of storage and computing architecture.\nEach service has its executable binaries and processes launched from the binaries. Users can deploy a NebulaGraph cluster on a single machine or multiple machines using these binaries.\nThe following figure shows the architecture of a typical NebulaGraph cluster.\nThe Meta Service\nThe Meta Service in the NebulaGraph architecture is run by the nebula-metad processes. It is responsible for metadata management, such as schema operations, cluster administration, and user privilege management.\nFor details on the Meta Service, see Meta Service.\nThe Graph Service and the Storage Service\nNebulaGraph applies the separation of storage and computing architecture. The Graph Service is responsible for querying. The Storage Service is responsible for storage. They are run by different processes, i.e., nebula-graphd and nebula-storaged. The benefits of the separation of storage and computing architecture are as follows:\nGreat scalabilityThe separated structure makes both the Graph Service and the Storage Service flexible and easy to scale in or out.\nHigh availabilityIf part of the Graph Service fails, the data stored by the Storage Service suffers no loss. And if the rest part of the Graph Service is still able to serve the clients, service recovery can be performed quickly, even unfelt by the users.\nCost-effectiveThe separation of storage and computing architecture provides a higher resource utilization rate, and it enables clients to manage the cost flexibly according to business demands.\nOpen to more possibilitiesWith the ability to run separately, the Graph Service may work with multiple types of storage engines, and the Storage Service may also serve more types of computing engines.\nFor details on the Graph Service and the Storage Service, see Graph Service and Storage Service.","url":"https://docs.nebula-graph.io/3.6.0/1.introduction/3.nebula-graph-architecture/1.architecture-overview/","type":"doc"},{"title":"Meta Service","content":"This topic introduces the architecture and functions of the Meta Service.\nThe architecture of the Meta Service\nThe architecture of the Meta Service is as follows:\nThe Meta Service is run by nebula-metad processes. Users can deploy nebula-metad processes according to the scenario:\nIn a test environment, users can deploy one or three nebula-metad processes on different machines or a single machine.\nIn a production environment, we recommend that users deploy three nebula-metad processes on different machines for high availability.\nAll the nebula-metad processes form a Raft-based cluster, with one process as the leader and the others as the followers.\nThe leader is elected by the majorities and only the leader can provide service to the clients or other components of NebulaGraph. The followers will be run in a standby way and each has a data replication of the leader. Once the leader fails, one of the followers will be elected as the new leader.\nFunctions of the Meta Service\nManages user accounts\nThe Meta Service stores the information of user accounts and the privileges granted to the accounts. When the clients send queries to the Meta Service through an account, the Meta Service checks the account information and whether the account has the right privileges to execute the queries or not.\nFor more information on NebulaGraph access control, see Authentication.\nManages partitions\nThe Meta Service stores and manages the locations of the storage partitions and helps balance the partitions.\nManages graph spaces\nNebulaGraph supports multiple graph spaces. Data stored in different graph spaces are securely isolated. The Meta Service stores the metadata of all graph spaces and tracks the changes of them, such as adding or dropping a graph space.\nManages schema information\nNebulaGraph is a strong-typed graph database. Its schema contains tags (i.e., the vertex types), edge types, tag properties, and edge type properties.\nThe Meta Service stores the schema information. Besides, it performs the addition, modification, and deletion of the schema, and logs the versions of them.\nFor more information on NebulaGraph schema, see Data model.\nManages TTL information\nThe Meta Service stores the definition of TTL (Time to Live) options which are used to control data expiration. The Storage Service takes care of the expiring and evicting processes. For more information, see TTL.\nManages jobs\nThe Job Management module in the Meta Service is responsible for the creation, queuing, querying, and deletion of jobs.","url":"https://docs.nebula-graph.io/3.6.0/1.introduction/3.nebula-graph-architecture/2.meta-service/","type":"doc"},{"title":"Graph Service","content":"The Graph Service is used to process the query. It has four submodules: Parser, Validator, Planner, and Executor. This topic will describe the Graph Service accordingly.\nThe architecture of the Graph Service\nAfter a query is sent to the Graph Service, it will be processed by the following four submodules:\nParser: Performs lexical analysis and syntax analysis.\nValidator: Validates the statements.\nPlanner: Generates and optimizes the execution plans.\nExecutor: Executes the plans with operators.\nParser\nAfter receiving a request, the statements will be parsed by Parser composed of Flex (lexical analysis tool) and Bison (syntax analysis tool), and its corresponding AST will be generated. Statements will be directly intercepted in this stage because of their invalid syntax.\nFor example, the structure of the AST of GO FROM \"Tim\" OVER like WHERE properties(edge).likeness > 8.0 YIELD dst(edge) is shown in the following figure.\nValidator\nValidator performs a series of validations on the AST. It mainly works on these tasks:\nValidating metadataValidator will validate whether the metadata is correct or not.\nWhen parsing the OVER, WHERE, and YIELD clauses, Validator looks up the Schema and verifies whether the edge type and tag data exist or not. For an INSERT statement, Validator verifies whether the types of the inserted data are the same as the ones defined in the Schema.\nValidating contextual referenceValidator will verify whether the cited variable exists or not, or whether the cited property is variable or not.\nFor composite statements, like $var = GO FROM \"Tim\" OVER like YIELD dst(edge) AS ID; GO FROM $var.ID OVER serve YIELD dst(edge), Validator verifies first to see if var is defined, and then to check if the ID property is attached to the var variable.\nValidating type inferenceValidator infers what type the result of an expression is and verifies the type against the specified clause.\nFor example, the WHERE clause requires the result to be a bool value, a NULL value, or empty.\nValidating the information of *Validator needs to verify all the Schema that involves * when verifying the clause if there is a * in the statement.\nTake a statement like GO FROM \"Tim\" OVER * YIELD dst(edge), properties(edge).likeness, dst(edge) as an example. When verifying the OVER clause, Validator needs to verify all the edge types. If the edge type includes like and serve, the statement would be GO FROM \"Tim\" OVER like,serve YIELD dst(edge), properties(edge).likeness, dst(edge).\nValidating input and outputValidator will check the consistency of the clauses before and after the |.\nIn the statement GO FROM \"Tim\" OVER like YIELD dst(edge) AS ID | GO FROM $-.ID OVER serve YIELD dst(edge), Validator will verify whether $-.ID is defined in the clause before the |.\nWhen the validation succeeds, an execution plan will be generated. Its data structure will be stored in the src/planner directory.\nPlanner\nIn the nebula-graphd.conf file, when enable_optimizer is set to be false, Planner will not optimize the execution plans generated by Validator. It will be executed by Executor directly.\nIn the nebula-graphd.conf file, when enable_optimizer is set to be true, Planner will optimize the execution plans generated by Validator. The structure is as follows.\nBefore optimizationIn the execution plan on the right side of the preceding figure, each node directly depends on other nodes. For example, the root node Project depends on the Filter node, the Filter node depends on the GetNeighbor node, and so on, up to the leaf node Start. Then the execution plan is (not truly) executed.\nDuring this stage, every node has its input and output variables, which are stored in a hash table. The execution plan is not truly executed, so the value of each key in the associated hash table is empty (except for the Start node, where the input variables hold the starting data), and the hash table is defined in src/context/ExecutionContext.cpp under the nebula-graph repository.\nFor example, if the hash table is named as ResultMap when creating the Filter node, users can determine that the node takes data from ResultMap[\"GN1\"], then puts the result into ResultMap[\"Filter2\"], and so on. All these work as the input and output of each node.\nProcess of optimizationThe optimization rules that Planner has implemented so far are considered RBO (Rule-Based Optimization), namely the pre-defined optimization rules. The CBO (Cost-Based Optimization) feature is under development. The optimized code is in the src/optimizer/ directory under the nebula-graph repository.\nRBO is a “bottom-up” exploration process. For each rule, the root node of the execution plan (in this case, the Project node) is the entry point, and step by step along with the node dependencies, it reaches the node at the bottom to see if it matches the rule.\nAs shown in the preceding figure, when the Filter node is explored, it is found that its children node is GetNeighbors, which matches successfully with the pre-defined rules, so a transformation is initiated to integrate the Filter node into the GetNeighbors node, the Filter node is removed, and then the process continues to the next rule. Therefore, when the GetNeighbor operator calls interfaces of the Storage layer to get the neighboring edges of a vertex during the execution stage, the Storage layer will directly filter out the unqualified edges internally. Such optimization greatly reduces the amount of data transfer, which is commonly known as filter pushdown.\nExecutor\nThe Executor module consists of Scheduler and Executor. The Scheduler generates the corresponding execution operators against the execution plan, starting from the leaf nodes and ending at the root node. The structure is as follows.\nEach node of the execution plan has one execution operator node, whose input and output have been determined in the execution plan. Each operator only needs to get the values for the input variables, compute them, and finally put the results into the corresponding output variables. Therefore, it is only necessary to execute step by step from Start, and the result of the last operator is returned to the user as the final result.\nSource code hierarchy\nThe source code hierarchy under the nebula-graph repository is as follows.\n|--src\n |--graph\n |--context //contexts for validation and execution\n |--executor //execution operators\n |--gc //garbage collector\n |--optimizer //optimization rules\n |--planner //structure of the execution plans\n |--scheduler //scheduler\n |--service //external service management\n |--session //session management\n |--stats //monitoring metrics\n |--util //basic components\n |--validator //validation of the statements\n |--visitor //visitor expression","url":"https://docs.nebula-graph.io/3.6.0/1.introduction/3.nebula-graph-architecture/3.graph-service/","type":"doc"},{"title":"Storage Service","content":"The persistent data of NebulaGraph have two parts. One is the Meta Service that stores the meta-related data.\nThe other is the Storage Service that stores the data, which is run by the nebula-storaged process. This topic will describe the architecture of the Storage Service.\nAdvantages\nHigh performance (Customized built-in KVStore)\nGreat scalability (Shared-nothing architecture, not rely on NAS/SAN-like devices)\nStrong consistency (Raft)\nHigh availability (Raft)\nSupports synchronizing with the third party systems, such as Elasticsearch.\nThe architecture of the Storage Service\nThe Storage Service is run by the nebula-storaged process. Users can deploy nebula-storaged processes on different occasions. For example, users can deploy 1 nebula-storaged process in a test environment and deploy 3 nebula-storaged processes in a production environment.\nAll the nebula-storaged processes consist of a Raft-based cluster. There are three layers in the Storage Service:\nStorage interface\nThe top layer is the storage interface. It defines a set of APIs that are related to the graph concepts. These API requests will be translated into a set of KV operations targeting the corresponding Partition. For example:\ngetNeighbors: queries the in-edge or out-edge of a set of vertices, returns the edges and the corresponding properties, and supports conditional filtering.\ninsert vertex/edge: inserts a vertex or edge and its properties.\ngetProps: gets the properties of a vertex or an edge.\nIt is this layer that makes the Storage Service a real graph storage. Otherwise, it is just a KV storage.\nConsensus\nBelow the storage interface is the consensus layer that implements Multi Group Raft, which ensures the strong consistency and high availability of the Storage Service.\nStore engine\nThe bottom layer is the local storage engine library, providing operations like get, put, and scan on local disks. The related interfaces are stored in KVStore.h and KVEngine.h files. You can develop your own local store plugins based on your needs.\nThe following will describe some features of the Storage Service based on the above architecture.\nStorage writing process\nKVStore\nNebulaGraph develops and customizes its built-in KVStore for the following reasons.\nIt is a high-performance KVStore.\nIt is provided as a (kv) library and can be easily developed for the filter pushdown purpose. As a strong-typed database, how to provide Schema during pushdown is the key to efficiency for NebulaGraph.\nIt has strong data consistency.\nTherefore, NebulaGraph develops its own KVStore with RocksDB as the local storage engine. The advantages are as follows.\nFor multiple local hard disks, NebulaGraph can make full use of its concurrent capacities through deploying multiple data directories.\nThe Meta Service manages all the Storage servers. All the partition distribution data and current machine status can be found in the meta service. Accordingly, users can execute a manual load balancing plan in meta service.\nNebulaGraph provides its own WAL mode so one can customize the WAL. Each partition owns its WAL.\nOne NebulaGraph KVStore cluster supports multiple graph spaces, and each graph space has its own partition number and replica copies. Different graph spaces are isolated physically from each other in the same cluster.\nData storage structure\nGraphs consist of vertices and edges. NebulaGraph uses key-value pairs to store vertices, edges, and their properties. Vertices and edges are stored in keys and their properties are stored in values. Such structure enables efficient property filtering.\nThe storage structure of vertices\nDifferent from NebulaGraph version 2.x, version 3.x added a new key for each vertex. Compared to the old key that still exists, the new key has no TagID field and no value. Vertices in NebulaGraph can now live without tags owing to the new key.\nField\nDescription\nType\nOne byte, used to indicate the key type.\nPartID\nThree bytes, used to indicate the sharding partition and to scan the partition data based on the prefix when re-balancing the partition.\nVertexID\nThe vertex ID. For an integer VertexID, it occupies eight bytes. However, for a string VertexID, it is changed to fixed_string of a fixed length which needs to be specified by users when they create the space.\nTagID\nFour bytes, used to indicate the tags that vertex relate with.\nSerializedValue\nThe serialized value of the key. It stores the property information of the vertex.\nThe storage structure of edges\nField\nDescription\nType\nOne byte, used to indicate the key type.\nPartID\nThree bytes, used to indicate the partition ID. This field can be used to scan the partition data based on the prefix when re-balancing the partition.\nVertexID\nUsed to indicate vertex ID. The former VID refers to the source VID in the outgoing edge and the dest VID in the incoming edge, while the latter VID refers to the dest VID in the outgoing edge and the source VID in the incoming edge.\nEdge Type\nFour bytes, used to indicate the edge type. Greater than zero indicates out-edge, less than zero means in-edge.\nRank\nEight bytes, used to indicate multiple edges in one edge type. Users can set the field based on needs and store weight, such as transaction time and transaction number.\nPlaceHolder\nOne byte. Reserved.\nSerializedValue\nThe serialized value of the key. It stores the property information of the edge.\nProperty descriptions\nNebulaGraph uses strong-typed Schema.\nNebulaGraph will store the properties of vertex and edges in order after encoding them. Since the length of fixed-length properties is fixed, queries can be made in no time according to offset. Before decoding, NebulaGraph needs to get (and cache) the schema information in the Meta Service. In addition, when encoding properties, NebulaGraph will add the corresponding schema version to support online schema change.\nData partitioning\nSince in an ultra-large-scale relational network, vertices can be as many as tens to hundreds of billions, and edges are even more than trillions. Even if only vertices and edges are stored, the storage capacity of both exceeds that of ordinary servers. Therefore, NebulaGraph uses hash to shard the graph elements and store them in different partitions.\nEdge partitioning and storage amplification\nIn NebulaGraph, an edge corresponds to two key-value pairs on the hard disk. When there are lots of edges and each has many properties, storage amplification will be obvious. The storage format of edges is shown in the figure below.\nIn this example, SrcVertex connects DstVertex via EdgeA, forming the path of (SrcVertex)-[EdgeA]->(DstVertex). SrcVertex, DstVertex, and EdgeA will all be stored in Partition x and Partition y as four key-value pairs in the storage layer. Details are as follows:\nThe key value of SrcVertex is stored in Partition x. Key fields include Type, PartID(x), VID(Src), and TagID. SerializedValue, namely Value, refers to serialized vertex properties.\nThe first key value of EdgeA, namely EdgeA_Out, is stored in the same partition as the SrcVertex. Key fields include Type, PartID(x), VID(Src), EdgeType(+ means out-edge), Rank(0), VID(Dst), and PlaceHolder. SerializedValue, namely Value, refers to serialized edge properties.\nThe key value of DstVertex is stored in Partition y. Key fields include Type, PartID(y), VID(Dst), and TagID. SerializedValue, namely Value, refers to serialized vertex properties.\nThe second key value of EdgeA, namely EdgeA_In, is stored in the same partition as the DstVertex. Key fields include Type, PartID(y), VID(Dst), EdgeType(- means in-edge), Rank(0), VID(Src), and PlaceHolder. SerializedValue, namely Value, refers to serialized edge properties, which is exactly the same as that in EdgeA_Out.\nEdgeA_Out and EdgeA_In are stored in storage layer with opposite directions, constituting EdgeA logically. EdgeA_Out is used for traversal requests starting from SrcVertex, such as (a)-[]->(); EdgeA_In is used for traversal requests starting from DstVertex, such as ()-[]->(a).\nLike EdgeA_Out and EdgeA_In, NebulaGraph redundantly stores the information of each edge, which doubles the actual capacities needed for edge storage. The key corresponding to the edge occupies a small hard disk space, but the space occupied by Value is proportional to the length and amount of the property value. Therefore, it will occupy a relatively large hard disk space if the property value of the edge is large or there are many edge property values.\nPartition algorithm\nNebulaGraph uses a static Hash strategy to shard data through a modulo operation on vertex ID. All the out-keys, in-keys, and tag data will be placed in the same partition. In this way, query efficiency is increased dramatically.\nWhen inserting into NebulaGraph, vertices and edges are distributed across different partitions. And the partitions are located on different machines. The number of partitions is set in the CREATE SPACE statement and cannot be changed afterward.\nIf certain vertices need to be placed on the same partition (i.e., on the same machine), see Formula/code.\nThe following code will briefly describe the relationship between VID and partition.\n// If VertexID occupies 8 bytes, it will be stored in int64 to be compatible with the version 1.0.\nuint64_t vid = 0;\nif (id.size() == 8) {\n memcpy(static_cast(&vid), id.data(), 8);\n} else {\n MurmurHash2 hash;\n vid = hash(id.data());\nPartitionID pId = vid % numParts + 1;\nRoughly speaking, after hashing a fixed string to int64, (the hashing of int64 is the number itself), do modulo, and then plus one, namely:\npId = vid % numParts + 1;\nParameters and descriptions of the preceding formula are as follows:\nParameter\nDescription\nThe modulo operation.\nnumParts\nThe number of partitions for the graph space where the VID is located, namely the value of partition_num in the CREATE SPACE statement.\npId\nThe ID for the partition where the VID is located.\nSuppose there are 100 partitions, the vertices with VID 1, 101, and 1001 will be stored on the same partition. But, the mapping between the partition ID and the machine address is random. Therefore, we cannot assume that any two partitions are located on the same machine.\nRaft\nRaft implementation\nIn a distributed system, one data usually has multiple replicas so that the system can still run normally even if a few copies fail. It requires certain technical means to ensure consistency between replicas.\nBasic principle: Raft is designed to ensure consistency between replicas. Raft uses election between replicas, and the (candidate) replica that wins more than half of the votes will become the Leader, providing external services on behalf of all replicas. The rest Followers will play backups. When the Leader fails (due to communication failure, operation and maintenance commands, etc.), the rest Followers will conduct a new round of elections and vote for a new Leader. The Leader and Followers will detect each other's survival through heartbeats and write them to the hard disk in Raft-wal mode. Replicas that do not respond to more than multiple heartbeats will be considered faulty.\nRead and write: For every writing request of the clients, the Leader will initiate a Raft-wal and synchronize it with the Followers. Only after over half replicas have received the Raft-wal will it return to the clients successfully. For every reading request of the clients, it will get to the Leader directly, while Followers will not be involved.\nFailure: Scenario 1: Take a (space) cluster of a single replica as an example. If the system has only one replica, the Leader will be itself. If failure happens, the system will be completely unavailable. Scenario 2: Take a (space) cluster of three replicas as an example. If the system has three replicas, one of them will be the Leader and the rest will be the Followers. If the Leader fails, the rest two can still vote for a new Leader (and a Follower), and the system is still available. But if any of the two Followers fails again, the system will be completely unavailable due to inadequate voters.\nMulti Group Raft\nThe Storage Service supports a distributed cluster architecture, so NebulaGraph implements Multi Group Raft according to Raft protocol. Each Raft group stores all the replicas of each partition. One replica is the leader, while others are followers. In this way, NebulaGraph achieves strong consistency and high availability. The functions of Raft are as follows.\nNebulaGraph uses Multi Group Raft to improve performance when there are many partitions because Raft-wal cannot be NULL. When there are too many partitions, costs will increase, such as storing information in Raft group, WAL files, or batch operation in low load.\nThere are two key points to implement the Multi Raft Group:\nTo share transport layer\nEach Raft Group sends messages to its corresponding peers. So if the transport layer cannot be shared, the connection costs will be very high.\nTo share thread pool\nRaft Groups share the same thread pool to prevent starting too many threads and a high context switch cost.\nBatch\nFor each partition, it is necessary to do a batch to improve throughput when writing the WAL serially. As NebulaGraph uses WAL to implement some special functions, batches need to be grouped, which is a feature of NebulaGraph.\nFor example, lock-free CAS operations will execute after all the previous WALs are committed. So for a batch, if there are several WALs in CAS type, we need to divide this batch into several smaller groups and make sure they are committed serially.\nTransfer Leadership\nTransfer leadership is extremely important for balance. When moving a partition from one machine to another, NebulaGraph first checks if the source is a leader. If so, it should be moved to another peer. After data migration is completed, it is important to balance leader distribution again.\nWhen a transfer leadership command is committed, the leader will abandon its leadership and the followers will start a leader election.\nPeer changes\nTo avoid split-brain, when members in a Raft Group change, an intermediate state is required. In such a state, the quorum of the old group and new group always have an overlap. Thus it prevents the old or new group from making decisions unilaterally. To make it even simpler, in his doctoral thesis Diego Ongaro suggests adding or removing a peer once to ensure the overlap between the quorum of the new group and the old group. NebulaGraph also uses this approach, except that the way to add or remove a member is different. For details, please refer to addPeer/removePeer in the Raft Part class.\nDifferences with HDFS\nThe Storage Service is a Raft-based distributed architecture, which has certain differences with that of HDFS. For example:\nThe Storage Service ensures consistency through Raft. Usually, the number of its replicas is odd to elect a leader. However, DataNode used by HDFS ensures consistency through NameNode, which has no limit on the number of replicas.\nIn the Storage Service, only the replicas of the leader can read and write, while in HDFS all the replicas can do so.\nIn the Storage Service, the number of replicas needs to be determined when creating a space, since it cannot be changed afterward. But in HDFS, the number of replicas can be changed freely.\nThe Storage Service can access the file system directly. While the applications of HDFS (such as HBase) have to access HDFS before the file system, which requires more RPC times.\nIn a word, the Storage Service is more lightweight with some functions simplified and its architecture is simpler than HDFS, which can effectively improve the read and write performance of a smaller block of data.","url":"https://docs.nebula-graph.io/3.6.0/1.introduction/3.nebula-graph-architecture/4.storage-service/","type":"doc"},{"title":"Quickly deploy NebulaGraph using Docker","content":"You can quickly get started with NebulaGraph by deploying NebulaGraph with Docker Desktop or Docker Compose. \nUsing Docker DesktopUsing Docker Compose\nNebulaGraph is available as a Docker Extension that you can easily install and run on your Docker Desktop. You can quickly deploy NebulaGraph using Docker Desktop with just one click.\nInstall Docker Desktop\nInstall Docker Desktop on Mac\nInstall Docker Desktop on Windows\nIn the left sidebar of Docker Desktop, click Extensions or Add Extensions.\nOn the Extensions Marketplace, search for NebulaGraph and click Install.\nClick Update to update NebulaGraph to the latest version when a new version is available.\nClick Open to navigate to the NebulaGraph extension page.\nAt the top of the page, click Studio in Browser to use NebulaGraph.\nFor more information about how to use NebulaGraph with Docker Desktop, see the following video:\nUsing Docker Compose can quickly deploy NebulaGraph services based on the prepared configuration file. It is only recommended to use this method when testing the functions of NebulaGraph.\nPrerequisites\nYou have installed the following applications on your host.\nApplication\nRecommended version\nOfficial installation reference\nDocker\nLatest\nInstall Docker Engine\nDocker Compose\nLatest\nInstall Docker Compose\nGit\nLatest\nDownload Git\nIf you are deploying NebulaGraph as a non-root user, grant the user with Docker-related privileges. For detailed instructions, see Manage Docker as a non-root user.\nYou have started the Docker service on your host.\nIf you have already deployed another version of NebulaGraph with Docker Compose on your host, to avoid compatibility issues, you need to delete the nebula-docker-compose/data directory.\nDeploy NebulaGraph\nClone the 3.6.0 branch of the nebula-docker-compose repository to your host with Git.\n$ git clone -b release-3.6 https://github.com/vesoft-inc/nebula-docker-compose.git\nGo to the nebula-docker-compose directory.\n$ cd nebula-docker-compose/\nRun the following command to start all the NebulaGraph services.\n[nebula-docker-compose]$ docker-compose up -d\nCreating nebula-docker-compose_metad0_1 ... done\nCreating nebula-docker-compose_metad2_1 ... done\nCreating nebula-docker-compose_metad1_1 ... done\nCreating nebula-docker-compose_graphd2_1 ... done\nCreating nebula-docker-compose_graphd_1 ... done\nCreating nebula-docker-compose_graphd1_1 ... done\nCreating nebula-docker-compose_storaged0_1 ... done\nCreating nebula-docker-compose_storaged2_1 ... done\nCreating nebula-docker-compose_storaged1_1 ... done\nConnect to NebulaGraph\nThere are two ways to connect to NebulaGraph:\nConnected with Nebula Console outside the container. Because the external mapping port for the Graph service is also fixed as 9669 in the container's configuration file, you can connect directly through the default port. For details, see Connect to NebulaGraph.\nLog into the container installed NebulaGraph Console, then connect to the Graph service. This section describes this approach.\nRun the following command to view the name of NebulaGraph Console docker container.\n$ docker-compose ps\n Name Command State Ports\n--------------------------------------------------------------------------------------------\nnebula-docker-compose_console_1 sh -c sleep 3 && Up\n nebula-co ...\n......\nRun the following command to enter the NebulaGraph Console docker container.\ndocker exec -it nebula-docker-compose_console_1 /bin/sh\n/ #\nConnect to NebulaGraph with NebulaGraph Console.\n/ # ./usr/local/bin/nebula-console -u -p --address=graphd --port=9669\nRun the following commands to view the cluster state.\n SHOW HOSTS;\nRun exit twice to switch back to your terminal (shell).\nCheck the NebulaGraph service status and ports\nRun docker-compose ps to list all the services of NebulaGraph and their status and ports.\n$ docker-compose ps\nnebula-docker-compose_console_1 sh -c sleep 3 && Up\n nebula-co ...\nnebula-docker-compose_graphd1_1 /usr/local/nebula/bin/nebu ... Up 0.0.0.0:49174->19669/tcp,:::49174->19669/tcp, 0.0.0.0:49171->19670/tcp,:::49171->19670/tcp, 0.0.0.0:49177->9669/tcp,:::49177->9669/tcp\nnebula-docker-compose_graphd2_1 /usr/local/nebula/bin/nebu ... Up 0.0.0.0:49175->19669/tcp,:::49175->19669/tcp, 0.0.0.0:49172->19670/tcp,:::49172->19670/tcp, 0.0.0.0:49178->9669/tcp,:::49178->9669/tcp\nnebula-docker-compose_graphd_1 /usr/local/nebula/bin/nebu ... Up 0.0.0.0:49180->19669/tcp,:::49180->19669/tcp, 0.0.0.0:49179->19670/tcp,:::49179->19670/tcp, 0.0.0.0:9669->9669/tcp,:::9669->9669/tcp\nnebula-docker-compose_metad0_1 /usr/local/nebula/bin/nebu ... Up 0.0.0.0:49157->19559/tcp,:::49157->19559/tcp, 0.0.0.0:49154->19560/tcp,:::49154->19560/tcp, 0.0.0.0:49160->9559/tcp,:::49160->9559/tcp, 9560/tcp\nnebula-docker-compose_metad1_1 /usr/local/nebula/bin/nebu ... Up 0.0.0.0:49156->19559/tcp,:::49156->19559/tcp, 0.0.0.0:49153->19560/tcp,:::49153->19560/tcp, 0.0.0.0:49159->9559/tcp,:::49159->9559/tcp, 9560/tcp\nnebula-docker-compose_metad2_1 /usr/local/nebula/bin/nebu ... Up 0.0.0.0:49158->19559/tcp,:::49158->19559/tcp, 0.0.0.0:49155->19560/tcp,:::49155->19560/tcp, 0.0.0.0:49161->9559/tcp,:::49161->9559/tcp, 9560/tcp\nnebula-docker-compose_storaged0_1 /usr/local/nebula/bin/nebu ... Up 0.0.0.0:49166->19779/tcp,:::49166->19779/tcp, 0.0.0.0:49163->19780/tcp,:::49163->19780/tcp, 9777/tcp, 9778/tcp, 0.0.0.0:49169->9779/tcp,:::49169->9779/tcp, 9780/tcp\nnebula-docker-compose_storaged1_1 /usr/local/nebula/bin/nebu ... Up 0.0.0.0:49165->19779/tcp,:::49165->19779/tcp, 0.0.0.0:49162->19780/tcp,:::49162->19780/tcp, 9777/tcp, 9778/tcp, 0.0.0.0:49168->9779/tcp,:::49168->9779/tcp, 9780/tcp\nnebula-docker-compose_storaged2_1 /usr/local/nebula/bin/nebu ... Up 0.0.0.0:49167->19779/tcp,:::49167->19779/tcp, 0.0.0.0:49164->19780/tcp,:::49164->19780/tcp, 9777/tcp, 9778/tcp, 0.0.0.0:49170->9779/tcp,:::49170->9779/tcp, 9780/tcp\nIf the service is abnormal, you can first confirm the abnormal container name (such as nebula-docker-compose_graphd2_1).\nThen you can execute docker ps to view the corresponding CONTAINER ID (such as 2a6c56c405f5).\n[nebula-docker-compose]$ docker ps\nCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n2a6c56c405f5 vesoft/nebula-graphd:nightly \"/usr/local/nebula/b…\" 36 minutes ago Up 36 minutes (healthy) 0.0.0.0:49230->9669/tcp, 0.0.0.0:49229->19669/tcp, 0.0.0.0:49228->19670/tcp nebula-docker-compose_graphd2_1\n7042e0a8e83d vesoft/nebula-storaged:nightly \"./bin/nebula-storag…\" 36 minutes ago Up 36 minutes (healthy) 9777-9778/tcp, 9780/tcp, 0.0.0.0:49227->9779/tcp, 0.0.0.0:49226->19779/tcp, 0.0.0.0:49225->19780/tcp nebula-docker-compose_storaged2_1\n18e3ea63ad65 vesoft/nebula-storaged:nightly \"./bin/nebula-storag…\" 36 minutes ago Up 36 minutes (healthy) 9777-9778/tcp, 9780/tcp, 0.0.0.0:49219->9779/tcp, 0.0.0.0:49218->19779/tcp, 0.0.0.0:49217->19780/tcp nebula-docker-compose_storaged0_1\n4dcabfe8677a vesoft/nebula-graphd:nightly \"/usr/local/nebula/b…\" 36 minutes ago Up 36 minutes (healthy) 0.0.0.0:49224->9669/tcp, 0.0.0.0:49223->19669/tcp, 0.0.0.0:49222->19670/tcp nebula-docker-compose_graphd1_1\na74054c6ae25 vesoft/nebula-graphd:nightly \"/usr/local/nebula/b…\" 36 minutes ago Up 36 minutes (healthy) 0.0.0.0:9669->9669/tcp, 0.0.0.0:49221->19669/tcp, 0.0.0.0:49220->19670/tcp nebula-docker-compose_graphd_1\n880025a3858c vesoft/nebula-storaged:nightly \"./bin/nebula-storag…\" 36 minutes ago Up 36 minutes (healthy) 9777-9778/tcp, 9780/tcp, 0.0.0.0:49216->9779/tcp, 0.0.0.0:49215->19779/tcp, 0.0.0.0:49214->19780/tcp nebula-docker-compose_storaged1_1\n45736a32a23a vesoft/nebula-metad:nightly \"./bin/nebula-metad …\" 36 minutes ago Up 36 minutes (healthy) 9560/tcp, 0.0.0.0:49213->9559/tcp, 0.0.0.0:49212->19559/tcp, 0.0.0.0:49211->19560/tcp nebula-docker-compose_metad0_1\n3b2c90eb073e vesoft/nebula-metad:nightly \"./bin/nebula-metad …\" 36 minutes ago Up 36 minutes (healthy) 9560/tcp, 0.0.0.0:49207->9559/tcp, 0.0.0.0:49206->19559/tcp, 0.0.0.0:49205->19560/tcp nebula-docker-compose_metad2_1\n7bb31b7a5b3f vesoft/nebula-metad:nightly \"./bin/nebula-metad …\" 36 minutes ago Up 36 minutes (healthy) 9560/tcp, 0.0.0.0:49210->9559/tcp, 0.0.0.0:49209->19559/tcp, 0.0.0.0:49208->19560/tcp nebula-docker-compose_metad1_1\nUse the CONTAINER ID to log in the container and troubleshoot.\nnebula-docker-compose]$ docker exec -it 2a6c56c405f5 bash\n[root@2a6c56c405f5 nebula]#\nCheck the service data and logs\nAll the data and logs of NebulaGraph are stored persistently in the nebula-docker-compose/data and nebula-docker-compose/logs directories.\nThe structure of the directories is as follows:\nnebula-docker-compose/\n |-- docker-compose.yaml\n ├── data\n │ ├── meta0\n │ ├── meta1\n │ ├── meta2\n │ ├── storage0\n │ ├── storage1\n │ └── storage2\n └── logs\n ├── graph\n ├── graph1\n ├── graph2\n ├── meta0\n ├── meta1\n ├── meta2\n ├── storage0\n ├── storage1\n └── storage2\nStop the NebulaGraph services\nYou can run the following command to stop the NebulaGraph services:\n$ docker-compose down\nThe following information indicates you have successfully stopped the NebulaGraph services:\nStopping nebula-docker-compose_console_1 ... done\nStopping nebula-docker-compose_graphd1_1 ... done\nStopping nebula-docker-compose_graphd_1 ... done\nStopping nebula-docker-compose_graphd2_1 ... done\nStopping nebula-docker-compose_storaged1_1 ... done\nStopping nebula-docker-compose_storaged0_1 ... done\nStopping nebula-docker-compose_storaged2_1 ... done\nStopping nebula-docker-compose_metad2_1 ... done\nStopping nebula-docker-compose_metad0_1 ... done\nStopping nebula-docker-compose_metad1_1 ... done\nRemoving nebula-docker-compose_console_1 ... done\nRemoving nebula-docker-compose_graphd1_1 ... done\nRemoving nebula-docker-compose_graphd_1 ... done\nRemoving nebula-docker-compose_graphd2_1 ... done\nRemoving nebula-docker-compose_storaged1_1 ... done\nRemoving nebula-docker-compose_storaged0_1 ... done\nRemoving nebula-docker-compose_storaged2_1 ... done\nRemoving nebula-docker-compose_metad2_1 ... done\nRemoving nebula-docker-compose_metad0_1 ... done\nRemoving nebula-docker-compose_metad1_1 ... done\nRemoving network nebula-docker-compose_nebula-net\nModify configurations\nThe configuration file of NebulaGraph deployed by Docker Compose is nebula-docker-compose/docker-compose.yaml. To make the new configuration take effect, modify the configuration in this file and restart the service.\nFor more instructions, see Configurations.\nFAQ\nHow to fix the docker mapping to external ports?\nTo set the ports of corresponding services as fixed mapping, modify the docker-compose.yaml in the nebula-docker-compose directory. For example:\ngraphd:\n image: vesoft/nebula-graphd:release-3.6\n ...\n ports:\n - 9669:9669\n - 19669\n - 19670\n9669:9669 indicates the internal port 9669 is uniformly mapped to external ports, while 19669 indicates the internal port 19669 is randomly mapped to external ports.\nHow to upgrade or update the docker images of NebulaGraph services\nIn the nebula-docker-compose/docker-compose.yaml file, change all the image values to the required image version.\nIn the nebula-docker-compose directory, run docker-compose pull to update the images of the Graph Service, Storage Service, Meta Service, and NebulaGraph Console.\nRun docker-compose up -d to start the NebulaGraph services again.\nAfter connecting to NebulaGraph with NebulaGraph Console, run SHOW HOSTS GRAPH, SHOW HOSTS STORAGE, or SHOW HOSTS META to check the version of the responding service respectively.\nERROR: toomanyrequests when docker-compose pull\nYou may meet the following error.\nERROR: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit.\nYou have met the rate limit of Docker Hub. Learn more on Understanding Docker Hub Rate Limiting.\nHow to update the NebulaGraph Console client\nThe command docker-compose pull updates both the NebulaGraph services and the NebulaGraph Console.","url":"https://docs.nebula-graph.io/3.6.0/2.quick-start/1.quick-start-workflow/","type":"doc"},{"title":"Step 1: Install NebulaGraph","content":"RPM and DEB are common package formats on Linux systems. This topic shows how to quickly install NebulaGraph with the RPM or DEB package.\nPrerequisites\nThe tool wget is installed.\nStep 1: Download the package from cloud service\nDownload the released version.URL:\n//Centos 7\nhttps://oss-cdn.nebula-graph.io/package//nebula-graph-.el7.x86_64.rpm\n//Centos 8\nhttps://oss-cdn.nebula-graph.io/package//nebula-graph-.el8.x86_64.rpm\n//Ubuntu 1604\nhttps://oss-cdn.nebula-graph.io/package//nebula-graph-.ubuntu1604.amd64.deb\n//Ubuntu 1804\nhttps://oss-cdn.nebula-graph.io/package//nebula-graph-.ubuntu1804.amd64.deb\n//Ubuntu 2004\nhttps://oss-cdn.nebula-graph.io/package//nebula-graph-.ubuntu2004.amd64.deb\nFor example, download the release package 3.6.0 for Centos 7.5:\nwget https://oss-cdn.nebula-graph.io/package/3.6.0/nebula-graph-3.6.0.el7.x86_64.rpm\nwget https://oss-cdn.nebula-graph.io/package/3.6.0/nebula-graph-3.6.0.el7.x86_64.rpm.sha256sum.txt\nDownload the release package 3.6.0 for Ubuntu 1804:\nwget https://oss-cdn.nebula-graph.io/package/3.6.0/nebula-graph-3.6.0.ubuntu1804.amd64.deb\nwget https://oss-cdn.nebula-graph.io/package/3.6.0/nebula-graph-3.6.0.ubuntu1804.amd64.deb.sha256sum.txt\nDownload the nightly version.\nURL:\n//Centos 7\nhttps://oss-cdn.nebula-graph.io/package/nightly//nebula-graph--nightly.el7.x86_64.rpm\n//Centos 8\nhttps://oss-cdn.nebula-graph.io/package/nightly//nebula-graph--nightly.el8.x86_64.rpm\n//Ubuntu 1604\nhttps://oss-cdn.nebula-graph.io/package/nightly//nebula-graph--nightly.ubuntu1604.amd64.deb\n//Ubuntu 1804\nhttps://oss-cdn.nebula-graph.io/package/nightly//nebula-graph--nightly.ubuntu1804.amd64.deb\n//Ubuntu 2004\nhttps://oss-cdn.nebula-graph.io/package/nightly//nebula-graph--nightly.ubuntu2004.amd64.deb\nFor example, download the Centos 7.5 package developed and built in 2021.11.28:\nwget https://oss-cdn.nebula-graph.io/package/nightly/2021.11.28/nebula-graph-2021.11.28-nightly.el7.x86_64.rpm\nwget https://oss-cdn.nebula-graph.io/package/nightly/2021.11.28/nebula-graph-2021.11.28-nightly.el7.x86_64.rpm.sha256sum.txt\nFor example, download the Ubuntu 1804 package developed and built in 2021.11.28:\nwget https://oss-cdn.nebula-graph.io/package/nightly/2021.11.28/nebula-graph-2021.11.28-nightly.ubuntu1804.amd64.deb\nwget https://oss-cdn.nebula-graph.io/package/nightly/2021.11.28/nebula-graph-2021.11.28-nightly.ubuntu1804.amd64.deb.sha256sum.txt\nStep 2: Install NebulaGraph\nUse the following syntax to install with an RPM package.\n$ sudo rpm -ivh --prefix= \nThe option --prefix indicates the installation path. The default path is /usr/local/nebula/.\nFor example, to install an RPM package in the default path for the 3.6.0 version, run the following command.\nsudo rpm -ivh nebula-graph-3.6.0.el7.x86_64.rpm\nUse the following syntax to install with a DEB package.\n$ sudo dpkg -i \nFor example, to install a DEB package for the 3.6.0 version, run the following command.\nsudo dpkg -i nebula-graph-3.6.0.ubuntu1804.amd64.deb\nNext to do\nStart NebulaGraph \nConnect to NebulaGraph","url":"https://docs.nebula-graph.io/3.6.0/2.quick-start/2.install-nebula-graph/","type":"doc"},{"title":"Step 2: Manage NebulaGraph Service","content":"NebulaGraph supports managing services with scripts. \nManage services with script\nYou can use the nebula.service script to start, stop, restart, terminate, and check the NebulaGraph services.\nSyntax\n$ sudo /usr/local/nebula/scripts/nebula.service\n[-v] [-c ]\n\n\nParameter\nDescription\n-v\nDisplay detailed debugging information.\n-c\nSpecify the configuration file path. The default path is /usr/local/nebula/etc/.\nstart\nStart the target services.\nstop\nStop the target services.\nrestart\nRestart the target services.\nkill\nTerminate the target services.\nstatus\nCheck the status of the target services.\nmetad\nSet the Meta Service as the target service.\ngraphd\nSet the Graph Service as the target service.\nstoraged\nSet the Storage Service as the target service.\nall\nSet all the NebulaGraph services as the target services.\nStart NebulaGraph\nRun the following command to start NebulaGraph.\n$ sudo /usr/local/nebula/scripts/nebula.service start all\n[INFO] Starting nebula-metad...\n[INFO] Done\n[INFO] Starting nebula-graphd...\n[INFO] Done\n[INFO] Starting nebula-storaged...\n[INFO] Done\nStop NebulaGraph\nRun the following command to stop NebulaGraph.\n$ sudo /usr/local/nebula/scripts/nebula.service stop all\n[INFO] Stopping nebula-metad...\n[INFO] Done\n[INFO] Stopping nebula-graphd...\n[INFO] Done\n[INFO] Stopping nebula-storaged...\n[INFO] Done\nCheck the service status\nRun the following command to check the service status of NebulaGraph.\n$ sudo /usr/local/nebula/scripts/nebula.service status all\nNebulaGraph is running normally if the following information is returned.\nINFO] nebula-metad(33fd35e): Running as 29020, Listening on 9559\n[INFO] nebula-graphd(33fd35e): Running as 29095, Listening on 9669\n[WARN] nebula-storaged after v3.0.0 will not start service until it is added to cluster.\n[WARN] See Manage Storage hosts:ADD HOSTS in https://docs.nebula-graph.io/\n[INFO] nebula-storaged(33fd35e): Running as 29147, Listening on 9779\nIf the returned result is similar to the following one, there is a problem. You may also go to the NebulaGraph community for help.[INFO] nebula-metad: Running as 25600, Listening on 9559\n[INFO] nebula-graphd: Exited\n[INFO] nebula-storaged: Running as 25646, Listening on 9779\nThe NebulaGraph services consist of the Meta Service, Graph Service, and Storage Service. The configuration files for all three services are stored in the /usr/local/nebula/etc/ directory by default. You can check the configuration files according to the returned result to troubleshoot problems.\nNext to do\nConnect to NebulaGraph","url":"https://docs.nebula-graph.io/3.6.0/2.quick-start/5.start-stop-service/","type":"doc"},{"title":"Step 3: Connect to NebulaGraph","content":"This topic provides basic instruction on how to use the native CLI client NebulaGraph Console to connect to NebulaGraph.\nNebulaGraph supports multiple types of clients, including a CLI client, a GUI client, and clients developed in popular programming languages. For more information, see the client list.\nPrerequisites\nYou have started NebulaGraph services.\nThe machine on which you plan to run NebulaGraph Console has network access to the Graph Service of NebulaGraph.\nThe NebulaGraph Console version is compatible with the NebulaGraph version.\nSteps\nOn the NebulaGraph Console releases page, select a NebulaGraph Console version and click Assets.\nIn the Assets area, find the correct binary file for the machine where you want to run NebulaGraph Console and download the file to the machine.\n(Optional) Rename the binary file to nebula-console for convenience.\nOn the machine to run NebulaGraph Console, grant the execute permission of the nebula-console binary file to the user.\n$ chmod 111 nebula-console\nIn the command line interface, change the working directory to the one where the nebula-console binary file is stored.\nRun the following command to connect to NebulaGraph.\nFor Linux or macOS:\n$ ./nebula-console -addr -port -u -p \n[-t 120] [-e \"nGQL_statement\" | -f filename.nGQL]\nFor Windows:\n> nebula-console.exe -addr -port -u -p \n[-t 120] [-e \"nGQL_statement\" | -f filename.nGQL]\nParameter descriptions are as follows:\nParameter\nDescription\n-h/-help\nShows the help menu.\n-addr/-address\nSets the IP (or hostname) of the Graph service. The default address is 127.0.0.1. \n-P/-port\nSets the port number of the graphd service. The default port number is 9669.\n-u/-user\nSets the username of your NebulaGraph account. Before enabling authentication, you can use any existing username. The default username is root.\n-p/-password\nSets the password of your NebulaGraph account. Before enabling authentication, you can use any characters as the password.\n-t/-timeout\nSets an integer-type timeout threshold of the connection. The unit is millisecond. The default value is 120.\n-e/-eval\nSets a string-type nGQL statement. The nGQL statement is executed once the connection succeeds. The connection stops after the result is returned.\n-f/-file\nSets the path of an nGQL file. The nGQL statements in the file are executed once the connection succeeds. The result will be returned and the connection stops then.\n-enable_ssl\nEnables SSL encryption when connecting to NebulaGraph.\n-ssl_root_ca_path\nSets the storage path of the certification authority file.\n-ssl_cert_path\nSets the storage path of the certificate file.\n-ssl_private_key_path\nSets the storage path of the private key file.\nFor information on more parameters, see the project repository.","url":"https://docs.nebula-graph.io/3.6.0/2.quick-start/3.connect-to-nebula-graph/","type":"doc"},{"title":"Register the Storage Service","content":"When connecting to NebulaGraph for the first time, you have to add the Storage hosts, and confirm that all the hosts are online.\nPrerequisites\nYou have connected to NebulaGraph.\nSteps\nAdd the Storage hosts.\nRun the following command to add hosts:\nADD HOSTS : [,: ...];\nExample:\n ADD HOSTS 192.168.10.100:9779, 192.168.10.101:9779, 192.168.10.102:9779;\nCheck the status of the hosts to make sure that they are all online.\n SHOW HOSTS;\nThe Status column of the result above shows that all Storage hosts are online.","url":"https://docs.nebula-graph.io/3.6.0/2.quick-start/3.1add-storage-hosts/","type":"doc"},{"title":"Step 4: Use nGQL (CRUD)","content":"This topic will describe the basic CRUD operations in NebulaGraph.\nFor more information, see nGQL guide.\nGraph space and NebulaGraph schema\nA NebulaGraph instance consists of one or more graph spaces. Graph spaces are physically isolated from each other. You can use different graph spaces in the same instance to store different datasets.\nTo insert data into a graph space, define a schema for the graph database. NebulaGraph schema is based on the following components.\nSchema component\nDescription\nVertex\nRepresents an entity in the real world. A vertex can have zero to multiple tags.\nTag\nThe type of the same group of vertices. It defines a set of properties that describes the types of vertices.\nEdge\nRepresents a directed relationship between two vertices.\nEdge type\nThe type of an edge. It defines a group of properties that describes the types of edges.\nFor more information, see Data modeling.\nIn this topic, we will use the following dataset to demonstrate basic CRUD operations.\nAsync implementation of CREATE and ALTER\nCREATE SPACE\nCREATE TAG\nCREATE EDGE\nALTER TAG\nALTER EDGE\nCREATE TAG INDEX\nCREATE EDGE INDEX\nCreate and use a graph space\nnGQL syntax\nCreate a graph space:CREATE SPACE [IF NOT EXISTS] (\n[partition_num = ,]\n[replica_factor = ,]\nvid_type = {FIXED_STRING() | INT64}\n[COMMENT = ''];\nFor more information on parameters, see CREATE SPACE.\nList graph spaces and check if the creation is successful: SHOW SPACES;\nUse a graph space:USE ;\nExamples\nUse the following statement to create a graph space named basketballplayer.\n CREATE SPACE basketballplayer(partition_num=15, replica_factor=1, vid_type=fixed_string(30));\nCheck the partition distribution with SHOW HOSTS to make sure that the partitions are distributed in a balanced way.\n SHOW HOSTS;\nIf the Leader distribution is uneven, use BALANCE LEADER to redistribute the partitions. For more information, see BALANCE.\nUse the basketballplayer graph space.\nnebula[(none)]> USE basketballplayer;\nYou can use SHOW SPACES to check the graph space you created.\n SHOW SPACES;\nCreate tags and edge types\nnGQL syntax\nCREATE {TAG | EDGE} [IF NOT EXISTS] { | }\n [NULL | NOT NULL] [DEFAULT ] [COMMENT '']\n [{, [NULL | NOT NULL] [DEFAULT ] [COMMENT '']} ...] \n [TTL_DURATION = ]\n [TTL_COL = ]\n [COMMENT = ''];\nFor more information on parameters, see CREATE TAG and CREATE EDGE.\nExamples\nCreate tags player and team, and edge types follow and serve. Descriptions are as follows.\nComponent name\nType\nProperty\nplayer\nTag\nname (string), age (int)\nteam\nTag\nname (string)\nfollow\nEdge type\ndegree (int)\nserve\nEdge type\nstart_year (int), end_year (int)\n CREATE TAG player(name string, age int);\n CREATE TAG team(name string);\n CREATE EDGE follow(degree int);\n CREATE EDGE serve(start_year int, end_year int);\nInsert vertices and edges\nYou can use the INSERT statement to insert vertices or edges based on existing tags or edge types.\nnGQL syntax\nInsert vertices:INSERT VERTEX [IF NOT EXISTS] [tag_props, [tag_props] ...]\nVALUES : ([prop_value_list])\ntag_props:\n tag_name ([prop_name_list])\nprop_name_list:\n [prop_name [, prop_name] ...]\nprop_value_list:\n [prop_value [, prop_value] ...] \nvid is short for Vertex ID. A vid must be a unique string value in a graph space. For details, see INSERT VERTEX.\nInsert edges:\nINSERT EDGE [IF NOT EXISTS] ( ) VALUES \n -> [@] : ( )\n[, -> [@] : ( ), ...];\n ::=\n[ [, ] ...]\n ::=\n[ [, ] ...]\nFor more information on parameters, see INSERT EDGE.\nExamples\nInsert vertices representing basketball players and teams: INSERT VERTEX player(name, age) VALUES \"player100\":(\"Tim Duncan\", 42);\n INSERT VERTEX player(name, age) VALUES \"player101\":(\"Tony Parker\", 36);\n INSERT VERTEX player(name, age) VALUES \"player102\":(\"LaMarcus Aldridge\", 33);\n INSERT VERTEX team(name) VALUES \"team203\":(\"Trail Blazers\"), \"team204\":(\"Spurs\");\nInsert edges representing the relations between basketball players and teams: INSERT EDGE follow(degree) VALUES \"player101\" -> \"player100\":(95);\n INSERT EDGE follow(degree) VALUES \"player101\" -> \"player102\":(90);\n INSERT EDGE follow(degree) VALUES \"player102\" -> \"player100\":(75);\n INSERT EDGE serve(start_year, end_year) VALUES \"player101\" -> \"team204\":(1999, 2018),\"player102\" -> \"team203\":(2006, 2015);\nRead data\nThe GO statement can traverse the database based on specific conditions. A GO traversal starts from one or more vertices, along one or more edges, and returns information in a form specified in the YIELD clause.\nThe FETCH statement is used to get properties from vertices or edges.\nThe LOOKUP statement is based on indexes. It is used together with the WHERE clause to search for the data that meet the specific conditions.\nThe MATCH statement is the most commonly used statement for graph data querying. It can describe all kinds of graph patterns, but it relies on indexes to match data patterns in NebulaGraph. Therefore, its performance still needs optimization.\nnGQL syntax\nGOGO [[ TO] {STEP|STEPS} ] FROM \nOVER [{REVERSELY | BIDIRECT}]\n[ WHERE ]\nYIELD [DISTINCT] \n[{ SAMPLE | }]\n[| GROUP BY { | expression> | } YIELD ]\n[| ORDER BY [{ASC | DESC}]]\n[| LIMIT [,] ];\nFETCH\nFetch properties on tags:\nFETCH PROP ON {[, tag_name ...] | *}\n [, vid ...]\nYIELD [AS ];\nFetch properties on edges:\nFETCH PROP ON -> [@] [, -> ...]\nYIELD ;\nLOOKUPLOOKUP ON { | }\n[WHERE [AND ...]]\nYIELD [AS ];\n\n [AS ] [, [AS ] ...];\nMATCHMATCH [] RETURN [];\nExamples of GO statement\nSearch for the players that the player with VID player101 follows. GO FROM \"player101\" OVER follow YIELD id($$);\nFilter the players that the player with VID player101 follows whose age is equal to or greater than 35. Rename the corresponding columns in the results with Teammate and Age. GO FROM \"player101\" OVER follow WHERE properties($$).age >= 35 \\\n YIELD properties($$).name AS Teammate, properties($$).age AS Age;\n |-------------+---------------------------------------------------------------------|\n | YIELD | Specifies what values or results you want to return from the query. |\n | $$ | Represents the target vertices. |\n | \\ | A line-breaker. |\nSearch for the players that the player with VID player101 follows. Then retrieve the teams of the players that the player with VID player100 follows. To combine the two queries, use a pipe or a temporary variable.\nWith a pipe:\n GO FROM \"player101\" OVER follow YIELD dst(edge) AS id | \\\n GO FROM $-.id OVER serve YIELD properties($$).name AS Team, \\\n properties($^).name AS Player;\nClause/Sign\nDescription\n$^\nRepresents the source vertex of the edge.\nA pipe symbol can combine multiple queries.\n$-\nRepresents the outputs of the query before the pipe symbol.\nWith a temporary variable:\n $var = GO FROM \"player101\" OVER follow YIELD dst(edge) AS id; \\\n GO FROM $var.id OVER serve YIELD properties($$).name AS Team, \\\n properties($^).name AS Player;\nExample of FETCH statement\nUse FETCH: Fetch the properties of the player with VID player100.\n FETCH PROP ON player \"player100\" YIELD properties(vertex);\nUpdate vertices and edges\nUsers can use the UPDATE or the UPSERT statements to update existing data.\nUPSERT is the combination of UPDATE and INSERT. If you update a vertex or an edge with UPSERT, the database will insert a new vertex or edge if it does not exist.\nnGQL syntax\nUPDATE vertices:UPDATE VERTEX SET \n[WHEN ] [YIELD ];\nUPDATE edges:UPDATE EDGE ON -> [@rank] \nSET [WHEN ] [YIELD ];\nUPSERT vertices or edges:UPSERT {VERTEX | EDGE } SET \n[WHEN ] [YIELD ];\nExamples\nUPDATE the name property of the vertex with VID player100 and check the result with the FETCH statement. UPDATE VERTEX \"player100\" SET player.name = \"Tim\";\n FETCH PROP ON player \"player100\" YIELD properties(vertex);\nUPDATE the degree property of an edge and check the result with the FETCH statement. UPDATE EDGE ON follow \"player101\" -> \"player100\" SET degree = 96;\n FETCH PROP ON follow \"player101\" -> \"player100\" YIELD properties(edge);\nInsert a vertex with VID player111 and UPSERT it. INSERT VERTEX player(name,age) VALUES \"player111\":(\"David West\", 38);\n UPSERT VERTEX \"player111\" SET player.name = \"David\", player.age = $^.player.age + 11 \\\n WHEN $^.player.name == \"David West\" AND $^.player.age > 20 \\\n YIELD $^.player.name AS Name, $^.player.age AS Age;\nDelete vertices and edges\nnGQL syntax\nDelete vertices:DELETE VERTEX [, ...]\nDelete edges:DELETE EDGE -> [@]\n[, -> ...]\nExamples\nDelete vertices: DELETE VERTEX \"player111\", \"team203\";\nDelete edges: DELETE EDGE follow \"player101\" -> \"team204\";\nAbout indexes\nUsers can add indexes to tags and edge types with the CREATE INDEX statement.\nnGQL syntax\nCreate an index:CREATE {TAG | EDGE} INDEX [IF NOT EXISTS] \nON { | } ([]) [COMMENT = ''];\nRebuild an index:REBUILD {TAG | EDGE} INDEX ;\nExamples of LOOKUP and MATCH (index-based)\nMake sure there is an index for LOOKUP or MATCH to use. If there is not, create an index first.\nFind the information of the vertex with the tag player and its value of the name property is Tony Parker.\nThis example creates the index player_index_1 on the name property.\n CREATE TAG INDEX IF NOT EXISTS player_index_1 ON player(name(20));\nThis example rebuilds the index to make sure it takes effect on pre-existing data.\n REBUILD TAG INDEX player_index_1\nThis example uses the LOOKUP statement to retrieve the vertex property.\n LOOKUP ON player WHERE player.name == \"Tony Parker\" \\\n YIELD properties(vertex).name AS name, properties(vertex).age AS age;\nThis example uses the MATCH statement to retrieve the vertex property.\n MATCH (v:player{name:\"Tony Parker\"}) RETURN v;","url":"https://docs.nebula-graph.io/3.6.0/2.quick-start/4.nebula-graph-crud/","type":"doc"},{"title":"nGQL cheatsheet","content":"Functions\nMath functions\nFunction\nDescription\ndouble abs(double x)\nReturns the absolute value of the argument.\ndouble floor(double x)\nReturns the largest integer value smaller than or equal to the argument. (Rounds down)\ndouble ceil(double x)\nReturns the smallest integer greater than or equal to the argument. (Rounds up)\ndouble round(double x)\nReturns the integer value nearest to the argument. Returns a number farther away from 0 if the argument is in the middle.\ndouble sqrt(double x)\nReturns the square root of the argument.\ndouble cbrt(double x)\nReturns the cubic root of the argument.\ndouble hypot(double x, double y)\nReturns the hypotenuse of a right-angled triangle.\ndouble pow(double x, double y)\nReturns the result of xy.\ndouble exp(double x)\nReturns the result of ex.\ndouble exp2(double x)\nReturns the result of 2x.\ndouble log(double x)\nReturns the base-e logarithm of the argument.\ndouble log2(double x)\nReturns the base-2 logarithm of the argument.\ndouble log10(double x)\nReturns the base-10 logarithm of the argument.\ndouble sin(double x)\nReturns the sine of the argument.\ndouble asin(double x)\nReturns the inverse sine of the argument.\ndouble cos(double x)\nReturns the cosine of the argument.\ndouble acos(double x)\nReturns the inverse cosine of the argument.\ndouble tan(double x)\nReturns the tangent of the argument.\ndouble atan(double x)\nReturns the inverse tangent of the argument.\ndouble rand()\nReturns a random floating point number in the range from 0 (inclusive) to 1 (exclusive); i.e.[0,1).\nint rand32(int min, int max)\nReturns a random 32-bit integer in [min, max).If you set only one argument, it is parsed as max and min is 0 by default.If you set no argument, the system returns a random signed 32-bit integer.\nint rand64(int min, int max)\nReturns a random 64-bit integer in [min, max).If you set only one argument, it is parsed as max and min is 0 by default.If you set no argument, the system returns a random signed 64-bit integer.\nbit_and()\nBitwise AND.\nbit_or()\nBitwise OR.\nbit_xor()\nBitwise XOR.\nint size()\nReturns the number of elements in a list or a map or the length of a string.\nint range(int start, int end, int step)\nReturns a list of integers from [start,end] in the specified steps. step is 1 by default.\nint sign(double x)\nReturns the signum of the given number.If the number is 0, the system returns 0.If the number is negative, the system returns -1.If the number is positive, the system returns 1.\ndouble e()\nReturns the base of the natural logarithm, e (2.718281828459045).\ndouble pi()\nReturns the mathematical constant pi (3.141592653589793).\ndouble radians()\nConverts degrees to radians. radians(180) returns 3.141592653589793.\nAggregating functions\nFunction\nDescription\navg()\nReturns the average value of the argument.\ncount()\nSyntax: count({expr | *}) .count()returns the number of rows (including NULL). count(expr)returns the number of non-NULL values that meet the expression. count() and size() are different.\nmax()\nReturns the maximum value.\nmin()\nReturns the minimum value.\ncollect()\nThe collect() function returns a list containing the values returned by an expression. Using this function aggregates data by merging multiple records or values into a single list.\nstd()\nReturns the population standard deviation.\nsum()\nReturns the sum value.\nString functions\nFunction\nDescription\nint strcasecmp(string a, string b)\nCompares string a and b without case sensitivity. When a = b, the return\nstring lower(string a)\nReturns the argument in lowercase.\nstring toLower(string a)\nThe same as lower().\nstring upper(string a)\nReturns the argument in uppercase.\nstring toUpper(string a)\nThe same as upper().\nint length(a)\nReturns the length of the given string in bytes or the length of a path in hops.\nstring trim(string a)\nRemoves leading and trailing spaces.\nstring ltrim(string a)\nRemoves leading spaces.\nstring rtrim(string a)\nRemoves trailing spaces.\nstring left(string a, int count)\nReturns a substring consisting of count characters from the left side of\nstring right(string a, int count)\nReturns a substring consisting of count characters from the right side of\nstring lpad(string a, int size, string letters)\nLeft-pads string a with string letters and returns a\nstring rpad(string a, int size, string letters)\nRight-pads string a with string letters and returns a\nstring substr(string a, int pos, int count)\nReturns a substring extracting count characters starting from\nstring substring(string a, int pos, int count)\nThe same as substr().\nstring reverse(string)\nReturns a string in reverse order.\nstring replace(string a, string b, string c)\nReplaces string b in string a with string c.\nlist split(string a, string b)\nSplits string a at string b and returns a list of strings.\nconcat()\nThe concat() function requires at least two or more strings. All the parameters are concatenated into one string.Syntax: concat(string1,string2,...)\nconcat_ws()\nThe concat_ws() function connects two or more strings with a predefined separator.\nextract()\nextract() uses regular expression matching to retrieve a single substring or all substrings from a string.\njson_extract()\nThe json_extract() function converts the specified JSON string to map.\nData and time functions\nFunction\nDescription\nint now()\nReturns the current timestamp of the system.\ntimestamp timestamp()\nReturns the current timestamp of the system.\ndate date()\nReturns the current UTC date based on the current system.\ntime time()\nReturns the current UTC time based on the current system.\ndatetime datetime()\nReturns the current UTC date and time based on the current system.\nSchema-related functions\nFor nGQL statements\nFunction\nDescription\nid(vertex)\nReturns the ID of a vertex. The data type of the result is the same as the vertex ID.\nmap properties(vertex)\nReturns the properties of a vertex.\nmap properties(edge)\nReturns the properties of an edge.\nstring type(edge)\nReturns the edge type of an edge.\nsrc(edge)\nReturns the source vertex ID of an edge. The data type of the result is the same as the vertex ID.\ndst(edge)\nReturns the destination vertex ID of an edge. The data type of the result is the same as the vertex ID.\nint rank(edge)\nReturns the rank value of an edge.\nvertex\nReturns the information of vertices, including VIDs, tags, properties, and values.\nedge\nReturns the information of edges, including edge types, source vertices, destination vertices, ranks, properties, and values.\nvertices\nReturns the information of vertices in a subgraph. For more information, see GET SUBGRAPH.\nedges\nReturns the information of edges in a subgraph. For more information, see GET SUBGRAPH.\npath\nReturns the information of a path. For more information, see FIND PATH.\nFor statements compatible with openCypher\nFunction\nDescription\nid()\nReturns the ID of a vertex. The data type of the result is the same as the vertex ID.\nlist tags()\nReturns the Tag of a vertex, which serves the same purpose as labels().\nlist labels()\nReturns the Tag of a vertex, which serves the same purpose as tags(). This function is used for compatibility with openCypher syntax.\nmap properties()\nReturns the properties of a vertex or an edge.\nstring type()\nReturns the edge type of an edge.\nsrc()\nReturns the source vertex ID of an edge. The data type of the result is the same as the vertex ID.\ndst()\nReturns the destination vertex ID of an edge. The data type of the result is the same as the vertex ID.\nvertex startNode()\nVisits an edge or a path and returns its source vertex ID.\nstring endNode()\nVisits an edge or a path and returns its destination vertex ID.\nint rank()\nReturns the rank value of an edge.\nList functions\nFunction\nDescription\nkeys(expr)\nReturns a list containing the string representations for all the property names of vertices, edges, or maps.\nlabels(vertex)\nReturns the list containing all the tags of a vertex.\nnodes(path)\nReturns the list containing all the vertices in a path.\nrange(start, end [, step])\nReturns the list containing all the fixed-length steps in [start,end]. step is 1 by default.\nrelationships(path)\nReturns the list containing all the relationships in a path.\nreverse(list)\nReturns the list reversing the order of all elements in the original list.\ntail(list)\nReturns all the elements of the original list, excluding the first one.\nhead(list)\nReturns the first element of a list.\nlast(list)\nReturns the last element of a list.\nreduce()\nThe reduce() function applies an expression to each element in a list one by one, chains the result to the next iteration by taking it as the initial value, and returns the final result.\nType conversion functions\nFunction\nDescription\nbool toBoolean()\nConverts a string value to a boolean value.\nfloat toFloat()\nConverts an integer or string value to a floating point number.\nstring toString()\nConverts non-compound types of data, such as numbers, booleans, and so on, to strings.\nint toInteger()\nConverts a floating point or string value to an integer value.\nset toSet()\nConverts a list or set value to a set value.\nint hash()\nThe hash() function returns the hash value of the argument. The argument can be a number, a string, a list, a boolean, null, or an expression that evaluates to a value of the preceding data types.\nPredicate functions\nPredicate functions return true or false. They are most commonly used in WHERE clauses.\n( IN WHERE )\nFunction\nDescription\nexists()\nReturns true if the specified property exists in the vertex, edge or map. Otherwise, returns false.\nany()\nReturns true if the specified predicate holds for at least one element in the given list. Otherwise, returns false.\nall()\nReturns true if the specified predicate holds for all elements in the given list. Otherwise, returns false.\nnone()\nReturns true if the specified predicate holds for no element in the given list. Otherwise, returns false.\nsingle()\nReturns true if the specified predicate holds for exactly one of the elements in the given list. Otherwise, returns false.\nConditional expressions functions\nFunction\nDescription\nCASE\nThe CASE expression uses conditions to filter the result of an nGQL query statement. It is usually used in the YIELD and RETURN clauses. The CASE expression will traverse all the conditions. When the first condition is met, the CASE expression stops reading the conditions and returns the result. If no conditions are met, it returns the result in the ELSE clause. If there is no ELSE clause and no conditions are met, it returns NULL.\ncoalesce()\nReturns the first not null value in all expressions.\nGeneral queries statements\nMATCH\nMATCH [] RETURN [];\nPattern\nExample\nDescription\nMatch vertices\n(v)\nYou can use a user-defined variable in a pair of parentheses to represent a vertex in a pattern. For example: (v).\nMatch tags\nMATCH (v:player) RETURN v\nYou can specify a tag with : after the vertex in a pattern.\nMatch multiple tags\nMATCH (v:player:team) RETURN v\nTo match vertices with multiple tags, use colons (:).\nMatch vertex properties\nMATCH (v:player{name:\"Tim Duncan\"}) RETURN v MATCH (v) WITH v, properties(v) as props, keys(properties(v)) as kk WHERE [i in kk where props[i] == \"Tim Duncan\"] RETURN v\nYou can specify a vertex property with {: } after the tag in a pattern; or use a vertex property value to get vertices directly.\nMatch a VID.\nMATCH (v) WHERE id(v) == 'player101' RETURN v\nYou can use the VID to match a vertex. The id() function can retrieve the VID of a vertex.\nMatch multiple VIDs.\nMATCH (v:player { name: 'Tim Duncan' })--(v2) WHERE id(v2) IN [\"player101\", \"player102\"] RETURN v2\nTo match multiple VIDs, use WHERE id(v) IN [vid_list].\nMatch connected vertices\nMATCH (v:player{name:\"Tim Duncan\"})--(v2) RETURN v2.player.name AS Name\nYou can use the -- symbol to represent edges of both directions and match vertices connected by these edges. You can add a > or < to the -- symbol to specify the direction of an edge.\nMatch paths\nMATCH p=(v:player{name:\"Tim Duncan\"})-->(v2) RETURN p\nConnected vertices and edges form a path. You can use a user-defined variable to name a path as follows.\nMatch edges\nMATCH (v:player{name:\"Tim Duncan\"})-[e]-(v2) RETURN eMATCH ()<-[e]-() RETURN e\nBesides using --, -->, or <-- to indicate a nameless edge, you can use a user-defined variable in a pair of square brackets to represent a named edge. For example: -[e]-.\nMatch an edge type\nMATCH ()-[e:follow]-() RETURN e\nJust like vertices, you can specify an edge type with : in a pattern. For example: -[e:follow]-.\nMatch edge type properties\nMATCH (v:player{name:\"Tim Duncan\"})-[e:follow{degree:95}]->(v2) RETURN e MATCH ()-[e]->() WITH e, properties(e) as props, keys(properties(e)) as kk WHERE [i in kk where props[i] == 90] RETURN e\nYou can specify edge type properties with {: } in a pattern. For example: [e:follow{likeness:95}]; or use an edge type property value to get edges directly.\nMatch multiple edge types\nMATCH (v:player{name:\"Tim Duncan\"})-[e:follow | :serve]->(v2) RETURN e\nThe | symbol can help matching multiple edge types. For example: [e:follow|:serve]. The English colon (:) before the first edge type cannot be omitted, but the English colon before the subsequent edge type can be omitted, such as [e:follow|serve].\nMatch multiple edges\nMATCH (v:player{name:\"Tim Duncan\"})-[]->(v2)<-[e:serve]-(v3) RETURN v2, v3\nYou can extend a pattern to match multiple edges in a path.\nMatch fixed-length paths\nMATCH p=(v:player{name:\"Tim Duncan\"})-[e:follow*2]->(v2) RETURN DISTINCT v2 AS Friends\nYou can use the :* pattern to match a fixed-length path. hop must be a non-negative integer. The data type of e is the list.\nMatch variable-length paths\nMATCH p=(v:player{name:\"Tim Duncan\"})-[e:follow*1..3]->(v2) RETURN v2 AS Friends\nminHop: Optional. It represents the minimum length of the path. minHop: must be a non-negative integer. The default value is 1.minHop and maxHop are optional and the default value is 1 and infinity respectively. The data type of e is the list.\nMatch variable-length paths with multiple edge types\nMATCH p=(v:player{name:\"Tim Duncan\"})-[e:follow | serve*2]->(v2) RETURN DISTINCT v2\nYou can specify multiple edge types in a fixed-length or variable-length pattern. In this case, hop, minHop, and maxHop take effect on all edge types. The data type of e is the list.\nRetrieve vertex or edge information\nMATCH (v:player{name:\"Tim Duncan\"}) RETURN vMATCH (v:player{name:\"Tim Duncan\"})-[e]->(v2) RETURN e\nUse RETURN { | } to retrieve all the information of a vertex or an edge.\nRetrieve VIDs\nMATCH (v:player{name:\"Tim Duncan\"}) RETURN id(v)\nUse the id() function to retrieve VIDs.\nRetrieve tags\nMATCH (v:player{name:\"Tim Duncan\"}) RETURN labels(v)\nUse the labels() function to retrieve the list of tags on a vertex.To retrieve the nth element in the labels(v) list, use labels(v)[n-1].\nRetrieve a single property on a vertex or an edge\nMATCH (v:player{name:\"Tim Duncan\"}) RETURN v.player.age\nUse RETURN { | }. to retrieve a single property.Use AS to specify an alias for a property.\nRetrieve all properties on a vertex or an edge\nMATCH p=(v:player{name:\"Tim Duncan\"})-[]->(v2) RETURN properties(v2)\nUse the properties() function to retrieve all properties on a vertex or an edge.\nRetrieve edge types\nMATCH p=(v:player{name:\"Tim Duncan\"})-[e]->() RETURN DISTINCT type(e)\nUse the type() function to retrieve the matched edge types.\nRetrieve paths\nMATCH p=(v:player{name:\"Tim Duncan\"})-[*3]->() RETURN p\nUse RETURN to retrieve all the information of the matched paths.\nRetrieve vertices in a path\nMATCH p=(v:player{name:\"Tim Duncan\"})-[]->(v2) RETURN nodes(p)\nUse the nodes() function to retrieve all vertices in a path.\nRetrieve edges in a path\nMATCH p=(v:player{name:\"Tim Duncan\"})-[]->(v2) RETURN relationships(p)\nUse the relationships() function to retrieve all edges in a path.\nRetrieve path length\nMATCH p=(v:player{name:\"Tim Duncan\"})-[*..2]->(v2) RETURN p AS Paths, length(p) AS Length\nUse the length() function to retrieve the length of a path.\nOPTIONAL MATCH\nPattern\nExample\nDescription\nMatches patterns against your graph database, just like MATCH does.\nMATCH (m)-[]->(n) WHERE id(m)==\"player100\" OPTIONAL MATCH (n)-[]->(l) RETURN id(m),id(n),id(l)\nIf no matches are found, OPTIONAL MATCH will use a null for missing parts of the pattern.\nLOOKUP\nLOOKUP ON { | } \n[WHERE [AND ...]] \nYIELD [AS ]\nPattern\nExample\nDescription\nRetrieve vertices\nLOOKUP ON player WHERE player.name == \"Tony Parker\" YIELD player.name AS name, player.age AS age\nThe following example returns vertices whose name is Tony Parker and the tag is player.\nRetrieve edges\nLOOKUP ON follow WHERE follow.degree == 90 YIELD follow.degree\nReturns edges whose degree is 90 and the edge type is follow.\nList vertices with a tag\nLOOKUP ON player YIELD properties(vertex),id(vertex)\nShows how to retrieve the VID of all vertices tagged with player.\nList edges with an edge types\nLOOKUP ON follow YIELD edge AS e\nShows how to retrieve the source Vertex IDs, destination vertex IDs, and ranks of all edges of the follow edge type.\nCount the numbers of vertices or edges\nLOOKUP ON player YIELD id(vertex)| YIELD COUNT(*) AS Player_Count\nShows how to count the number of vertices tagged with player.\nCount the numbers of edges\nLOOKUP ON follow YIELD edge as e| YIELD COUNT(*) AS Like_Count\nShows how to count the number of edges of the follow edge type.\nGO\nGO [[ TO] {STEP|STEPS} ] FROM \nOVER [{REVERSELY | BIDIRECT}]\n[ WHERE ]\nYIELD [DISTINCT] \n[{SAMPLE | LIMIT }]\n[| GROUP BY {col_name | expr | position} YIELD ]\n[| ORDER BY [{ASC | DESC}]]\n[| LIMIT [,] ]\nExample\nDescription\nGO FROM \"player102\" OVER serve YIELD dst(edge)\nReturns the teams that player 102 serves.\nGO 2 STEPS FROM \"player102\" OVER follow YIELD dst(edge)\nReturns the friends of player 102 with 2 hops.\nGO FROM \"player100\", \"player102\" OVER serve WHERE properties(edge).start_year > 1995 YIELD DISTINCT properties($$).name AS team_name, properties(edge).start_year AS start_year, properties($^).name AS player_name\nAdds a filter for the traversal.\nGO FROM \"player100\" OVER follow, serve YIELD properties(edge).degree, properties(edge).start_year\nThe following example traverses along with multiple edge types. If there is no value for a property, the output is NULL.\nGO FROM \"player100\" OVER follow REVERSELY YIELD src(edge) AS destination\nThe following example returns the neighbor vertices in the incoming direction of player 100.\nGO FROM \"player100\" OVER follow REVERSELY YIELD src(edge) AS id | GO FROM $-.id OVER serve WHERE properties($^).age > 20 YIELD properties($^).name AS FriendOf, properties($$).name AS Team\nThe following example retrieves the friends of player 100 and the teams that they serve.\nGO FROM \"player102\" OVER follow YIELD dst(edge) AS both\nThe following example returns all the neighbor vertices of player 102.\nGO 2 STEPS FROM \"player100\" OVER follow YIELD src(edge) AS src, dst(edge) AS dst, properties($$).age AS age | GROUP BY $-.dst YIELD $-.dst AS dst, collect_set($-.src) AS src, collect($-.age) AS age\nThe following example the outputs according to age.\nFETCH\nFetch vertex properties\nFETCH PROP ON {[, tag_name ...] | *} \n [, vid ...] \nYIELD [AS ]\nExample\nDescription\nFETCH PROP ON player \"player100\" YIELD properties(vertex)\nSpecify a tag in the FETCH statement to fetch the vertex properties by that tag.\nFETCH PROP ON player \"player100\" YIELD player.name AS name\nUse a YIELD clause to specify the properties to be returned.\nFETCH PROP ON player \"player101\", \"player102\", \"player103\" YIELD properties(vertex)\nSpecify multiple VIDs (vertex IDs) to fetch properties of multiple vertices. Separate the VIDs with commas.\nFETCH PROP ON player, t1 \"player100\", \"player103\" YIELD properties(vertex)\nSpecify multiple tags in the FETCH statement to fetch the vertex properties by the tags. Separate the tags with commas.\nFETCH PROP ON * \"player100\", \"player106\", \"team200\" YIELD properties(vertex)\nSet an asterisk symbol * to fetch properties by all tags in the current graph space.\nFetch edge properties\nFETCH PROP ON -> [@] [, -> ...]\nYIELD ;\nExample\nDescription\nFETCH PROP ON serve \"player100\" -> \"team204\" YIELD properties(edge)\nThe following statement fetches all the properties of the serve edge that connects vertex \"player100\" and vertex \"team204\".\nFETCH PROP ON serve \"player100\" -> \"team204\" YIELD serve.start_year\nUse a YIELD clause to fetch specific properties of an edge.\nFETCH PROP ON serve \"player100\" -> \"team204\", \"player133\" -> \"team202\" YIELD properties(edge)\nSpecify multiple edge patterns ( -> [@]) to fetch properties of multiple edges. Separate the edge patterns with commas.\nFETCH PROP ON serve \"player100\" -> \"team204\"@1 YIELD properties(edge)\nTo fetch on an edge whose rank is not 0, set its rank in the FETCH statement.\nGO FROM \"player101\" OVER follow YIELD follow._src AS s, follow._dst AS d | FETCH PROP ON follow $-.s -> $-.d YIELD follow.degree\nThe following statement returns the degree values of the follow edges that start from vertex \"player101\".\n$var = GO FROM \"player101\" OVER follow YIELD follow._src AS s, follow._dst AS d; FETCH PROP ON follow $var.s -> $var.d YIELD follow.degree\nYou can use user-defined variables to construct similar queries.\nSHOW\nStatement\nSyntax\nExample\nDescription\nSHOW CHARSET\nSHOW CHARSET\nSHOW CHARSET\nShows the available character sets.\nSHOW COLLATION\nSHOW COLLATION\nSHOW COLLATION\nShows the collations supported by NebulaGraph.\nSHOW CREATE SPACE\nSHOW CREATE SPACE \nSHOW CREATE SPACE basketballplayer\nShows the creating statement of the specified graph space.\nSHOW CREATE TAG/EDGE\nSHOW CREATE {TAG | EDGE }\nSHOW CREATE TAG player\nShows the basic information of the specified tag.\nSHOW HOSTS\nSHOW HOSTS [GRAPH | STORAGE | META]\nSHOW HOSTSSHOW HOSTS GRAPH\nShows the host and version information of Graph Service, Storage Service, and Meta Service.\nSHOW INDEX STATUS\nSHOW {TAG | EDGE} INDEX STATUS\nSHOW TAG INDEX STATUS\nShows the status of jobs that rebuild native indexes, which helps check whether a native index is successfully rebuilt or not.\nSHOW INDEXES\nSHOW {TAG | EDGE} INDEXES\nSHOW TAG INDEXES\nShows the names of existing native indexes.\nSHOW PARTS\nSHOW PARTS []\nSHOW PARTS\nShows the information of a specified partition or all partitions in a graph space.\nSHOW ROLES\nSHOW ROLES IN \nSHOW ROLES in basketballplayer\nShows the roles that are assigned to a user account.\nSHOW SNAPSHOTS\nSHOW SNAPSHOTS\nSHOW SNAPSHOTS\nShows the information of all the snapshots.\nSHOW SPACES\nSHOW SPACES\nSHOW SPACES\nShows existing graph spaces in NebulaGraph.\nSHOW STATS\nSHOW STATS\nSHOW STATS\nShows the statistics of the graph space collected by the latest STATS job.\nSHOW TAGS/EDGES\nSHOW TAGS | EDGES\nSHOW TAGS,SHOW EDGES\nShows all the tags in the current graph space.\nSHOW USERS\nSHOW USERS\nSHOW USERS\nShows the user information.\nSHOW SESSIONS\nSHOW SESSIONS\nSHOW SESSIONS\nShows the information of all the sessions.\nSHOW SESSIONS\nSHOW SESSION \nSHOW SESSION 1623304491050858\nShows a specified session with its ID.\nSHOW QUERIES\nSHOW [ALL] QUERIES\nSHOW QUERIES\nShows the information of working queries in the current session.\nSHOW META LEADER\nSHOW META LEADER\nSHOW META LEADER\nShows the information of the leader in the current Meta cluster.\nClauses and options\nClause\nSyntax\nExample\nDescription\nGROUP BY\nGROUP BY YIELD , \nGO FROM \"player100\" OVER follow BIDIRECT YIELD $$.player.name as Name | GROUP BY $-.Name YIELD $-.Name as Player, count(*) AS Name_Count\nFinds all the vertices connected directly to vertex \"player100\", groups the result set by player names, and counts how many times the name shows up in the result set.\nLIMIT\nYIELD [| LIMIT [,] ]\nGO FROM \"player100\" OVER follow REVERSELY YIELD $$.player.name AS Friend, $$.player.age AS Age | ORDER BY $-.Age, $-.Friend | LIMIT 1, 3\nReturns the 3 rows of data starting from the second row of the sorted output.\nSKIP\nRETURN [SKIP ] [LIMIT ]\nMATCH (v:player{name:\"Tim Duncan\"}) --> (v2) RETURN v2.player.name AS Name, v2.player.age AS Age ORDER BY Age DESC SKIP 1\nSKIP can be used alone to set the offset and return the data after the specified position.\nSAMPLE\n SAMPLE ;\nGO 3 STEPS FROM \"player100\" OVER * YIELD properties($$).name AS NAME, properties($$).age AS Age SAMPLE [1,2,3];\nTakes samples evenly in the result set and returns the specified amount of data.\nORDER BY\n ORDER BY [ASC | DESC] [, [ASC | DESC] ...]\nFETCH PROP ON player \"player100\", \"player101\", \"player102\", \"player103\" YIELD player.age AS age, player.name AS name | ORDER BY $-.age ASC, $-.name DESC\nThe ORDER BY clause specifies the order of the rows in the output.\nRETURN\nRETURN {||
${log}
+ {log} +
{item}